[med-svn] [python-skbio] 12/13: Imported Upstream version 0.4.0

Andreas Tille tille at debian.org
Thu Jul 30 15:46:27 UTC 2015


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository python-skbio.

commit c4a696f744fef520e161dca1cc038464395466b1
Author: Andreas Tille <tille at debian.org>
Date:   Thu Jul 30 17:42:46 2015 +0200

    Imported Upstream version 0.4.0
---
 .coveragerc                                        |    2 -
 .travis.yml                                        |   32 +-
 CHANGELOG.md                                       |   68 +
 CONTRIBUTING.md                                    |  157 +-
 MANIFEST.in                                        |    3 +-
 skbio/format/__init__.py => Makefile               |   15 +-
 README.rst                                         |  137 +-
 RELEASE.md                                         |   11 +-
 assets/logo-inverted.svg                           |   76 +
 checklist.py                                       |  108 +-
 doc/README.md                                      |   58 +-
 doc/source/_static/style.css                       |    1 +
 doc/source/_templates/autosummary/class.rst        |    4 +-
 doc/source/conf.py                                 |   58 +-
 doc/source/development/coding_guidelines.rst       |   11 +-
 doc/source/development/new_module.rst              |    6 +-
 doc/source/development/py3.rst                     |    4 +-
 doc/source/format.sequences.rst                    |    1 -
 doc/source/index.rst                               |   16 +-
 doc/source/parse.sequences.rst                     |    1 -
 doc/source/user/api_stability.rst                  |   69 +
 doc/source/user/assets/api-lifecycle.png           |  Bin 0 -> 40262 bytes
 doc/sphinxext/numpydoc/LICENSE.txt                 |   94 -
 doc/sphinxext/numpydoc/README.rst                  |   54 -
 doc/sphinxext/numpydoc/numpydoc/__init__.py        |    3 -
 doc/sphinxext/numpydoc/numpydoc/comment_eater.py   |  169 -
 .../numpydoc/numpydoc/compiler_unparse.py          |  865 ---
 doc/sphinxext/numpydoc/numpydoc/docscrape.py       |  525 --
 .../numpydoc/numpydoc/docscrape_sphinx.py          |  274 -
 doc/sphinxext/numpydoc/numpydoc/linkcode.py        |   83 -
 doc/sphinxext/numpydoc/numpydoc/numpydoc.py        |  187 -
 doc/sphinxext/numpydoc/numpydoc/phantom_import.py  |  167 -
 doc/sphinxext/numpydoc/numpydoc/plot_directive.py  |  642 ---
 .../numpydoc/numpydoc/tests/test_docscrape.py      |  767 ---
 .../numpydoc/numpydoc/tests/test_linkcode.py       |    5 -
 .../numpydoc/numpydoc/tests/test_phantom_import.py |   12 -
 .../numpydoc/numpydoc/tests/test_plot_directive.py |   11 -
 .../numpydoc/numpydoc/tests/test_traitsdoc.py      |   11 -
 doc/sphinxext/numpydoc/numpydoc/traitsdoc.py       |  142 -
 .../README.md                                      |    0
 .../dm.txt                                         |    0
 .../map.txt                                        |    0
 .../scikit-bio presentation.ipynb                  |    0
 .../smalldm.txt                                    |    0
 .../style.css                                      |    0
 .../talktools.py                                   |    0
 licenses/flask.txt                                 |   33 +
 licenses/numpydoc.txt                              |    1 -
 licenses/python.txt                                |  270 +
 setup.py                                           |   53 +-
 skbio/__init__.py                                  |   43 +-
 skbio/alignment/__init__.py                        |   75 +-
 skbio/alignment/_alignment.py                      | 1411 ++---
 skbio/alignment/_exception.py                      |    9 +-
 skbio/alignment/_lib/__init__.py                   |    6 +-
 skbio/alignment/_pairwise.py                       |  158 +-
 skbio/alignment/_ssw_wrapper.c                     | 5948 +++++++++-----------
 skbio/alignment/_ssw_wrapper.pyx                   |   97 +-
 skbio/alignment/tests/__init__.py                  |    2 +
 skbio/alignment/tests/test_alignment.py            |  707 +--
 skbio/alignment/tests/test_pairwise.py             |  138 +-
 skbio/alignment/tests/test_ssw.py                  |   25 +-
 skbio/diversity/__init__.py                        |    6 +-
 skbio/diversity/alpha/__init__.py                  |    6 +-
 skbio/diversity/alpha/_ace.py                      |    7 +-
 skbio/diversity/alpha/_base.py                     |   25 +
 skbio/diversity/alpha/_chao1.py                    |    8 +-
 skbio/diversity/alpha/_gini.py                     |    7 +-
 skbio/diversity/alpha/_lladser.py                  |   11 +-
 skbio/diversity/alpha/tests/__init__.py            |    4 +-
 skbio/diversity/alpha/tests/test_ace.py            |    5 +-
 skbio/diversity/alpha/tests/test_base.py           |    5 +-
 skbio/diversity/alpha/tests/test_chao1.py          |    5 +-
 skbio/diversity/alpha/tests/test_gini.py           |    5 +-
 skbio/diversity/beta/__init__.py                   |   13 +-
 skbio/diversity/beta/_base.py                      |   17 +-
 skbio/diversity/beta/tests/__init__.py             |    2 +
 skbio/diversity/beta/tests/test_base.py            |    4 +-
 skbio/draw/__init__.py                             |    6 +-
 skbio/draw/_distributions.py                       |   16 +-
 skbio/draw/tests/__init__.py                       |    4 +-
 skbio/draw/tests/test_distributions.py             |   45 +-
 skbio/format/sequences/__init__.py                 |   39 -
 skbio/format/sequences/fasta.py                    |  176 -
 skbio/format/sequences/fastq.py                    |   78 -
 skbio/format/sequences/tests/test_fasta.py         |   74 -
 skbio/format/sequences/tests/test_fastq.py         |   39 -
 skbio/io/__init__.py                               |  325 +-
 skbio/io/_exception.py                             |   33 +-
 skbio/io/_fileobject.py                            |  139 +
 skbio/io/_iosources.py                             |  237 +
 skbio/io/_registry.py                              |  818 ---
 skbio/io/_warning.py                               |    4 +-
 skbio/{parse => io/format}/__init__.py             |    9 +-
 skbio/io/{ => format}/_base.py                     |   88 +-
 skbio/io/{ => format}/clustal.py                   |  113 +-
 skbio/io/format/emptyfile.py                       |   43 +
 skbio/io/{ => format}/fasta.py                     |  662 ++-
 skbio/io/{ => format}/fastq.py                     |  300 +-
 skbio/io/{ => format}/lsmat.py                     |   37 +-
 skbio/io/{ => format}/newick.py                    |   25 +-
 skbio/io/{ => format}/ordination.py                |   21 +-
 skbio/io/{ => format}/phylip.py                    |   42 +-
 skbio/io/{ => format}/qseq.py                      |  105 +-
 .../sequences => io/format}/tests/__init__.py      |    4 +-
 skbio/io/{ => format}/tests/data/empty             |    0
 .../{ => format}/tests/data/error_diff_ids.fastq   |    0
 .../tests/data/error_double_qual.fastq             |    0
 .../{ => format}/tests/data/error_double_seq.fastq |    0
 .../{ => format}/tests/data/error_long_qual.fastq  |    0
 .../io/{ => format}/tests/data/error_no_qual.fastq |    0
 .../{ => format}/tests/data/error_qual_del.fastq   |    0
 .../tests/data/error_qual_escape.fastq             |    0
 .../{ => format}/tests/data/error_qual_null.fastq  |  Bin
 .../{ => format}/tests/data/error_qual_space.fastq |    0
 .../{ => format}/tests/data/error_qual_tab.fastq   |    0
 .../tests/data/error_qual_unit_sep.fastq           |    0
 .../{ => format}/tests/data/error_qual_vtab.fastq  |    0
 .../{ => format}/tests/data/error_short_qual.fastq |    0
 .../io/{ => format}/tests/data/error_spaces.fastq  |    0
 skbio/io/{ => format}/tests/data/error_tabs.fastq  |    0
 .../tests/data/error_trunc_at_plus.fastq           |    0
 .../tests/data/error_trunc_at_qual.fastq           |    0
 .../tests/data/error_trunc_at_seq.fastq            |    0
 .../tests/data/error_trunc_in_plus.fastq           |    0
 .../tests/data/error_trunc_in_qual.fastq           |    0
 .../tests/data/error_trunc_in_seq.fastq            |    0
 .../tests/data/error_trunc_in_title.fastq          |    0
 skbio/io/{ => format}/tests/data/fasta_10_seqs     |    0
 .../{ => format}/tests/data/fasta_3_seqs_defaults  |    0
 .../tests/data/fasta_3_seqs_non_defaults           |    0
 .../tests/data/fasta_5_blanks_start_of_file}       |    7 +-
 .../tests/data/fasta_5_ws_lines_start_of_file}     |    7 +-
 .../tests/data/fasta_6_blanks_start_of_file}       |    8 +-
 .../tests/data/fasta_6_ws_lines_start_of_file}     |    8 +-
 .../tests/data/fasta_blank_lines_between_records}  |   14 +-
 .../tests/data/fasta_blanks_end_of_file}           |    9 +-
 ...fasta_description_newline_replacement_empty_str |    0
 ...asta_description_newline_replacement_multi_char |    0
 .../fasta_description_newline_replacement_none     |    0
 .../data/fasta_id_whitespace_replacement_empty_str |    0
 .../fasta_id_whitespace_replacement_multi_char     |    0
 .../data/fasta_id_whitespace_replacement_none      |    0
 .../tests/data/fasta_invalid_after_10_seqs         |    0
 .../data/fasta_invalid_blank_line_after_header}    |    2 +-
 .../data/fasta_invalid_blank_line_within_sequence} |    3 +-
 .../tests/data/fasta_invalid_blank_sequence}       |    1 +
 .../tests/data/fasta_invalid_legacy_format         |    0
 .../tests/data/fasta_invalid_missing_header        |    0
 .../data/fasta_invalid_missing_seq_data_first      |    0
 .../tests/data/fasta_invalid_missing_seq_data_last |    0
 .../data/fasta_invalid_missing_seq_data_middle     |    0
 .../fasta_invalid_whitespace_line_after_header}    |    2 +-
 ...a_invalid_whitespace_only_line_within_sequence} |    1 +
 .../data/fasta_invalid_whitespace_only_sequence}   |    1 -
 skbio/io/{ => format}/tests/data/fasta_max_width_1 |    0
 skbio/io/{ => format}/tests/data/fasta_max_width_5 |    3 -
 .../tests/data/fasta_mixed_qual_scores             |    0
 skbio/io/{ => format}/tests/data/fasta_multi_seq   |    2 -
 .../tests/data/fasta_multi_seq_roundtrip           |    0
 .../tests/data/fasta_prot_seqs_odd_labels          |    0
 .../data/fasta_sequence_collection_different_type  |    6 +-
 .../tests/data/fasta_single_bio_seq_defaults       |    2 +-
 .../tests/data/fasta_single_bio_seq_non_defaults   |    4 +-
 .../tests/data/fasta_single_dna_seq_defaults       |    2 +-
 .../tests/data/fasta_single_dna_seq_non_defaults   |    4 +-
 .../tests/data/fasta_single_prot_seq_defaults      |    2 +-
 .../tests/data/fasta_single_prot_seq_non_defaults  |    2 +-
 .../tests/data/fasta_single_rna_seq_defaults       |    2 +-
 .../tests/data/fasta_single_rna_seq_non_defaults   |    4 +-
 skbio/io/{ => format}/tests/data/fasta_single_seq  |    0
 .../tests/data/fasta_ws_lines_between_records}     |   13 +-
 .../tests/data/fasta_ws_lines_end_of_file}         |   10 +-
 .../tests/data/fastq_5_blanks_start_of_file}       |    5 +
 .../tests/data/fastq_5_ws_lines_start_of_file}     |    5 +
 .../tests/data/fastq_blank_lines}                  |    3 +
 .../tests/data/fastq_invalid_blank_after_header}   |    1 +
 .../tests/data/fastq_invalid_blank_after_plus}     |    1 +
 .../tests/data/fastq_invalid_blank_after_seq}      |    1 +
 .../data/fastq_invalid_blank_in_seq_at_symbol}     |    4 +-
 .../tests/data/fastq_invalid_blank_within_qual}    |    4 +-
 .../tests/data/fastq_invalid_blank_within_seq}     |    4 +-
 .../tests/data/fastq_invalid_missing_header        |    0
 .../tests/data/fastq_invalid_missing_seq_data      |    0
 .../tests/data/fastq_invalid_ws_line_after_header} |    1 +
 .../tests/data/fastq_invalid_ws_line_after_plus}   |    1 +
 .../tests/data/fastq_invalid_ws_line_after_seq}    |    1 +
 .../tests/data/fastq_invalid_ws_line_within_qual}  |    4 +-
 .../tests/data/fastq_invalid_ws_line_within_seq}   |    4 +-
 .../tests/data/fastq_multi_blank_between_records}  |    8 +
 .../tests/data/fastq_multi_blank_end_of_file}      |    7 +
 .../tests/data/fastq_multi_blank_start_of_file}    |    6 +
 .../{ => format}/tests/data/fastq_multi_seq_sanger |    0
 .../tests/data/fastq_multi_whitespace_stripping    |   14 +
 .../data/fastq_multi_ws_lines_between_records}     |    6 +
 .../tests/data/fastq_multi_ws_lines_end_of_file    |   20 +
 .../tests/data/fastq_multi_ws_lines_start_of_file} |    6 +
 .../tests/data/fastq_single_seq_illumina1.3        |    2 +-
 .../tests/data/fastq_whitespace_only_lines}        |    3 +
 .../data/fastq_wrapping_as_illumina_no_description |    0
 .../data/fastq_wrapping_as_sanger_no_description   |    0
 .../fastq_wrapping_original_sanger_no_description  |    0
 .../tests/data/fastq_writer_illumina1.3_defaults   |    6 +-
 .../tests/data/fastq_writer_sanger_defaults        |    6 +-
 .../tests/data/fastq_writer_sanger_non_defaults    |    6 +-
 .../data/illumina_full_range_as_illumina.fastq     |    0
 .../tests/data/illumina_full_range_as_sanger.fastq |    0
 .../illumina_full_range_original_illumina.fastq    |    0
 .../tests/data/longreads_as_illumina.fastq         |    0
 .../tests/data/longreads_as_sanger.fastq           |    0
 .../tests/data/longreads_original_sanger.fastq     |    0
 .../tests/data/misc_dna_as_illumina.fastq          |    0
 .../tests/data/misc_dna_as_sanger.fastq            |    0
 .../tests/data/misc_dna_original_sanger.fastq      |    0
 .../tests/data/misc_rna_as_illumina.fastq          |    0
 .../tests/data/misc_rna_as_sanger.fastq            |    0
 .../tests/data/misc_rna_original_sanger.fastq      |    0
 .../tests/data/ordination_L&L_CA_data_scores       |    0
 .../data/ordination_PCoA_sample_data_3_scores      |    0
 skbio/io/{ => format}/tests/data/ordination_error1 |    0
 .../io/{ => format}/tests/data/ordination_error10  |    0
 .../io/{ => format}/tests/data/ordination_error11  |    0
 .../io/{ => format}/tests/data/ordination_error12  |    0
 .../io/{ => format}/tests/data/ordination_error13  |    0
 .../io/{ => format}/tests/data/ordination_error14  |    0
 .../io/{ => format}/tests/data/ordination_error15  |    0
 .../io/{ => format}/tests/data/ordination_error16  |    0
 .../io/{ => format}/tests/data/ordination_error17  |    0
 .../io/{ => format}/tests/data/ordination_error18  |    0
 .../io/{ => format}/tests/data/ordination_error19  |    0
 skbio/io/{ => format}/tests/data/ordination_error2 |    0
 .../io/{ => format}/tests/data/ordination_error20  |    0
 .../io/{ => format}/tests/data/ordination_error21  |    0
 .../io/{ => format}/tests/data/ordination_error22  |    0
 .../io/{ => format}/tests/data/ordination_error23  |    0
 .../io/{ => format}/tests/data/ordination_error24  |    0
 skbio/io/{ => format}/tests/data/ordination_error3 |    0
 skbio/io/{ => format}/tests/data/ordination_error4 |    0
 skbio/io/{ => format}/tests/data/ordination_error5 |    0
 skbio/io/{ => format}/tests/data/ordination_error6 |    0
 skbio/io/{ => format}/tests/data/ordination_error7 |    0
 skbio/io/{ => format}/tests/data/ordination_error8 |    0
 skbio/io/{ => format}/tests/data/ordination_error9 |    0
 .../tests/data/ordination_example2_scores          |    0
 .../tests/data/ordination_example3_scores          |    0
 .../tests/data/ordination_exp_Ordination_CCA_site  |    0
 .../ordination_exp_Ordination_CCA_site_constraints |    0
 .../data/ordination_exp_Ordination_CCA_species     |    0
 .../tests/data/ordination_exp_Ordination_PCoA_site |    0
 .../tests/data/ordination_exp_Ordination_RDA_site  |    0
 .../ordination_exp_Ordination_RDA_site_constraints |    0
 .../data/ordination_exp_Ordination_RDA_species     |    0
 skbio/io/{ => format}/tests/data/phylip_dna_3_seqs |    0
 .../{ => format}/tests/data/phylip_single_seq_long |    0
 .../tests/data/phylip_single_seq_short             |    0
 skbio/io/{ => format}/tests/data/phylip_two_chunks |    0
 .../tests/data/phylip_variable_length_ids          |    0
 .../io/{ => format}/tests/data/qseq_invalid_filter |    0
 skbio/io/{ => format}/tests/data/qseq_invalid_lane |    0
 skbio/io/{ => format}/tests/data/qseq_invalid_read |    0
 skbio/io/{ => format}/tests/data/qseq_invalid_tile |    0
 skbio/io/{ => format}/tests/data/qseq_invalid_x    |    0
 skbio/io/{ => format}/tests/data/qseq_invalid_y    |    0
 .../tests/data/qseq_multi_seq_illumina1.3          |    0
 .../{ => format}/tests/data/qseq_single_seq_sanger |    0
 .../{ => format}/tests/data/qual_2_seqs_defaults   |    0
 .../{ => format}/tests/data/qual_3_seqs_defaults   |    0
 .../tests/data/qual_3_seqs_defaults_desc_mismatch  |    0
 .../tests/data/qual_3_seqs_defaults_extra          |    0
 .../tests/data/qual_3_seqs_defaults_id_mismatch    |    0
 .../data/qual_3_seqs_defaults_length_mismatch      |    0
 .../tests/data/qual_3_seqs_non_defaults            |    0
 .../tests/data/qual_5_blanks_start_of_file}        |   13 +-
 .../tests/data/qual_5_ws_lines_start_of_file       |   17 +
 .../tests/data/qual_6_blanks_start_of_file}        |   14 +-
 .../tests/data/qual_6_ws_lines_start_of_file       |   18 +
 .../tests/data/qual_blank_lines_between_records}   |   18 +-
 .../tests/data/qual_blanks_end_of_file}            |   15 +-
 .../qual_description_newline_replacement_empty_str |    2 +-
 ...qual_description_newline_replacement_multi_char |    2 +-
 .../data/qual_description_newline_replacement_none |    2 +-
 .../data/qual_id_whitespace_replacement_empty_str  |    0
 .../data/qual_id_whitespace_replacement_multi_char |    0
 .../tests/data/qual_id_whitespace_replacement_none |    0
 .../data/qual_invalid_blank_line_after_header}     |    2 +-
 .../tests/data/qual_invalid_blank_line_within_seq} |    3 +-
 .../tests/data/qual_invalid_blank_sequence}        |    1 +
 .../tests/data/qual_invalid_legacy_format          |    0
 .../tests/data/qual_invalid_missing_header         |    0
 .../data/qual_invalid_missing_qual_scores_first    |    0
 .../data/qual_invalid_missing_qual_scores_last     |    0
 .../data/qual_invalid_missing_qual_scores_middle   |    0
 .../tests/data/qual_invalid_qual_scores_float      |    0
 .../tests/data/qual_invalid_qual_scores_negative   |    0
 .../tests/data/qual_invalid_qual_scores_over_255}  |    2 +-
 .../tests/data/qual_invalid_qual_scores_string     |    0
 .../data/qual_invalid_whitespace_line_in_seq}      |    3 +-
 .../data/qual_invalid_whitespace_only_sequence}    |    1 +
 .../tests/data/qual_invalid_ws_line_after_header}  |    2 +-
 skbio/io/{ => format}/tests/data/qual_max_width_1  |    4 +-
 skbio/io/{ => format}/tests/data/qual_max_width_5  |   21 +-
 skbio/io/{ => format}/tests/data/qual_multi_seq    |    8 +-
 .../tests/data/qual_multi_seq_roundtrip            |    0
 .../tests/data/qual_prot_seqs_odd_labels           |    2 +-
 .../data/qual_sequence_collection_different_type   |    2 +-
 .../tests/data/qual_single_bio_seq_non_defaults    |    0
 .../tests/data/qual_single_dna_seq_non_defaults    |    0
 .../tests/data/qual_single_nuc_seq_non_defaults    |    0
 .../tests/data/qual_single_prot_seq_non_defaults   |    0
 .../tests/data/qual_single_rna_seq_non_defaults    |    0
 skbio/io/format/tests/data/qual_single_seq         |    2 +
 .../tests/data/qual_ws_lines_between_records       |   22 +
 .../io/format/tests/data/qual_ws_lines_end_of_file |   20 +
 .../tests/data/sanger_full_range_as_illumina.fastq |    0
 .../tests/data/sanger_full_range_as_sanger.fastq   |    0
 .../data/sanger_full_range_original_sanger.fastq   |    0
 .../data/solexa_full_range_original_solexa.fastq   |    0
 skbio/io/{ => format}/tests/data/tsv_10_fields     |    0
 skbio/io/{ => format}/tests/data/tsv_8_fields      |    0
 skbio/io/{ => format}/tests/data/whitespace_only   |    0
 .../tests/data/wrapping_as_illumina.fastq          |    0
 .../tests/data/wrapping_as_sanger.fastq            |    0
 .../tests/data/wrapping_original_sanger.fastq      |    0
 skbio/io/{ => format}/tests/test_base.py           |   92 +-
 skbio/io/{ => format}/tests/test_clustal.py        |  120 +-
 skbio/io/format/tests/test_emptyfile.py            |   38 +
 skbio/io/{ => format}/tests/test_fasta.py          |  671 ++-
 skbio/io/{ => format}/tests/test_fastq.py          |  365 +-
 skbio/io/{ => format}/tests/test_lsmat.py          |    9 +-
 skbio/io/{ => format}/tests/test_newick.py         |    8 +-
 skbio/io/{ => format}/tests/test_ordination.py     |   15 +-
 skbio/io/{ => format}/tests/test_phylip.py         |   48 +-
 skbio/io/{ => format}/tests/test_qseq.py           |  210 +-
 skbio/io/registry.py                               | 1145 ++++
 skbio/io/tests/__init__.py                         |    2 +
 skbio/io/tests/data/big5_file                      |    1 +
 skbio/io/tests/data/big5_file.bz2                  |  Bin 0 -> 46 bytes
 skbio/io/tests/data/big5_file.gz                   |  Bin 0 -> 35 bytes
 skbio/io/tests/data/example_file                   |    2 +
 skbio/io/tests/data/example_file.bz2               |  Bin 0 -> 84 bytes
 skbio/io/tests/data/example_file.gz                |  Bin 0 -> 83 bytes
 skbio/io/tests/data/fasta_single_nuc_seq_defaults  |    2 -
 .../tests/data/fasta_single_nuc_seq_non_defaults   |    6 -
 skbio/io/tests/data/qual_single_seq                |    2 -
 skbio/io/tests/test_iosources.py                   |   53 +
 skbio/io/tests/test_registry.py                    | 1554 +++--
 skbio/io/tests/test_util.py                        |  643 ++-
 skbio/io/util.py                                   |  276 +-
 skbio/parse/record.py                              |  491 --
 skbio/parse/record_finder.py                       |  193 -
 skbio/parse/sequences/__init__.py                  |  201 -
 skbio/parse/sequences/clustal.py                   |  100 -
 skbio/parse/sequences/factory.py                   |  147 -
 skbio/parse/sequences/fasta.py                     |  240 -
 skbio/parse/sequences/fastq.py                     |  176 -
 skbio/parse/sequences/iterator.py                  |  206 -
 skbio/parse/sequences/tests/__init__.py            |    9 -
 skbio/parse/sequences/tests/data/fna1.fasta        |    4 -
 skbio/parse/sequences/tests/data/fna1.fna.gz       |  Bin 49 -> 0 bytes
 skbio/parse/sequences/tests/data/fna1.qual         |    4 -
 skbio/parse/sequences/tests/data/fq1.fastq.gz      |  Bin 60 -> 0 bytes
 skbio/parse/sequences/tests/data/fq1.fq            |    8 -
 skbio/parse/sequences/tests/data/noextensionfasta  |    4 -
 skbio/parse/sequences/tests/data/qs1.qseq.gz       |  Bin 91 -> 0 bytes
 skbio/parse/sequences/tests/test_clustal.py        |  155 -
 skbio/parse/sequences/tests/test_factory.py        |  201 -
 skbio/parse/sequences/tests/test_fasta.py          |  196 -
 skbio/parse/sequences/tests/test_fastq.py          |  223 -
 skbio/parse/sequences/tests/test_iterator.py       |  336 --
 skbio/parse/tests/__init__.py                      |    9 -
 skbio/parse/tests/test_record.py                   |  550 --
 skbio/parse/tests/test_record_finder.py            |  257 -
 skbio/sequence/__init__.py                         |  369 +-
 skbio/sequence/_base.py                            |   43 +
 skbio/sequence/_dna.py                             |  410 ++
 skbio/sequence/_exception.py                       |   29 -
 skbio/sequence/_genetic_code.py                    | 1184 ++--
 skbio/sequence/_iupac_sequence.py                  |  601 ++
 skbio/sequence/_nucleotide_mixin.py                |  363 ++
 skbio/sequence/_protein.py                         |  215 +
 skbio/sequence/_rna.py                             |  351 ++
 skbio/sequence/_sequence.py                        | 3311 ++++++-----
 skbio/sequence/tests/__init__.py                   |    4 +-
 skbio/sequence/tests/test_base.py                  |   48 +
 skbio/sequence/tests/test_dna.py                   |   45 +
 skbio/sequence/tests/test_genetic_code.py          |  834 +--
 skbio/sequence/tests/test_iupac_sequence.py        |  509 ++
 skbio/sequence/tests/test_nucleotide_sequences.py  |  474 ++
 skbio/sequence/tests/test_protein.py               |  124 +
 skbio/sequence/tests/test_sequence.py              | 3726 +++++++-----
 skbio/stats/__init__.py                            |   16 +-
 skbio/stats/__subsample.c                          | 1929 +++----
 skbio/stats/_misc.py                               |   50 -
 skbio/stats/_subsample.py                          |   62 +-
 skbio/stats/composition.py                         |  389 ++
 skbio/stats/distance/__init__.py                   |   24 +-
 skbio/stats/distance/_anosim.py                    |   75 +-
 skbio/stats/distance/_base.py                      |  379 +-
 skbio/stats/distance/_bioenv.py                    |   14 +-
 skbio/stats/distance/_mantel.py                    |   16 +-
 skbio/stats/distance/_permanova.py                 |   89 +-
 skbio/stats/distance/tests/__init__.py             |    4 +-
 skbio/stats/distance/tests/test_anosim.py          |   99 +-
 skbio/stats/distance/tests/test_base.py            |  119 +-
 skbio/stats/distance/tests/test_bioenv.py          |   25 +-
 skbio/stats/distance/tests/test_mantel.py          |   37 +-
 skbio/stats/distance/tests/test_permanova.py       |  101 +-
 skbio/stats/evolve/__init__.py                     |   41 +
 skbio/stats/evolve/_hommola.py                     |  268 +
 skbio/{draw => stats/evolve}/tests/__init__.py     |    4 +-
 skbio/stats/evolve/tests/test_hommola.py           |  188 +
 skbio/stats/gradient.py                            |   19 +-
 skbio/stats/ordination/__init__.py                 |    9 +-
 skbio/stats/ordination/_base.py                    |   82 +-
 .../_canonical_correspondence_analysis.py          |    3 +
 skbio/stats/ordination/_correspondence_analysis.py |    3 +
 .../ordination/_principal_coordinate_analysis.py   |    3 +
 skbio/stats/ordination/_redundancy_analysis.py     |    3 +
 skbio/stats/ordination/_utils.py                   |    7 +
 skbio/stats/ordination/tests/__init__.py           |    2 +
 skbio/stats/ordination/tests/test_ordination.py    |   35 +-
 skbio/stats/power.py                               |  792 ++-
 skbio/stats/spatial.py                             |    8 +-
 skbio/stats/tests/__init__.py                      |    4 +-
 skbio/stats/tests/test_composition.py              |  222 +
 skbio/stats/tests/test_gradient.py                 |   63 +-
 skbio/stats/tests/test_misc.py                     |   41 -
 skbio/stats/tests/test_power.py                    |  305 +-
 skbio/stats/tests/test_spatial.py                  |    5 +-
 skbio/stats/tests/test_subsample.py                |   20 +-
 skbio/{parse/sequences/_exception.py => test.py}   |   17 +-
 skbio/tests/__init__.py                            |    4 +-
 skbio/tests/test_workflow.py                       |    4 +-
 skbio/tree/__init__.py                             |   54 +-
 skbio/tree/_exception.py                           |    4 +-
 skbio/tree/_majority_rule.py                       |   33 +-
 skbio/tree/_nj.py                                  |    8 +-
 skbio/tree/_tree.py                                |  717 +--
 skbio/tree/_trie.py                                |   48 +-
 skbio/tree/tests/__init__.py                       |    2 +
 skbio/tree/tests/test_majority_rule.py             |    6 +-
 skbio/tree/tests/test_nj.py                        |    6 +-
 skbio/tree/tests/test_tree.py                      |  445 +-
 skbio/util/__init__.py                             |   17 +-
 skbio/util/_decorator.py                           |  338 ++
 skbio/util/_exception.py                           |    9 +-
 skbio/util/_misc.py                                |   96 +-
 skbio/util/_testing.py                             |  112 +
 skbio/util/_warning.py                             |    4 +-
 skbio/util/tests/__init__.py                       |    4 +-
 skbio/util/tests/test_decorator.py                 |  275 +
 skbio/util/tests/test_misc.py                      |  150 +-
 skbio/util/tests/test_testing.py                   |   84 +-
 skbio/workflow.py                                  |   41 +-
 454 files changed, 23828 insertions(+), 23698 deletions(-)

diff --git a/.coveragerc b/.coveragerc
index 84e1f79..438720d 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -11,10 +11,8 @@ include = */skbio/*
 [report]
 exclude_lines =
     pragma: no cover
-    def __repr__
     raise NotImplementedError
     if __name__ == .__main__.:
 omit =
     */tests*
     */__init__.py
-
diff --git a/.travis.yml b/.travis.yml
index a08278f..9ea3927 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,14 +2,10 @@
 # modified from https://gist.github.com/dan-blanchard/7045057
 language: python
 env:
-  # Test against latest versions of numpy and matplotlib. Also test against
-  # older versions of numpy and matplotlib (pre-1.4.0). matplotlib's boxplot
-  # functionality was largely rewritten/refactored in 1.4.0 and some of skbio's
-  # code had to be updated in the process, so it's worth testing against a
-  # pre-1.4.0 version and whatever the latest version is.
-  - PYTHON_VERSION=3.4 NUMPY_VERSION= MATPLOTLIB_VERSION=
-  - PYTHON_VERSION=2.7 NUMPY_VERSION= MATPLOTLIB_VERSION= WITH_DOCTEST=True USE_CYTHON=TRUE
-  - PYTHON_VERSION=2.7 NUMPY_VERSION='=1.7' MATPLOTLIB_VERSION='=1.3.1' WITH_DOCTEST=True
+  - PYTHON_VERSION=3.4
+  - PYTHON_VERSION=3.3
+  - PYTHON_VERSION=2.7 USE_CYTHON=TRUE
+  - PYTHON_VERSION=2.7
 before_install:
   - "export DISPLAY=:99.0"
   - "sh -e /etc/init.d/xvfb start"
@@ -20,19 +16,19 @@ before_install:
   # Update conda itself
   - conda update --yes conda
 install:
-  - conda create --yes -n env_name python=$PYTHON_VERSION pip numpy$NUMPY_VERSION scipy matplotlib$MATPLOTLIB_VERSION pandas nose pep8 Sphinx=1.2.2 IPython
+  - conda create --yes -n env_name python=$PYTHON_VERSION pip numpy scipy matplotlib pandas nose pep8 Sphinx=1.2.2 IPython
   - if [ ${USE_CYTHON} ]; then conda install --yes -n env_name cython; fi
   - source activate env_name
-  - pip install sphinx-bootstrap-theme future six coveralls natsort pyflakes flake8 python-dateutil
+  - pip install sphinx-bootstrap-theme HTTPretty future six bz2file contextlib2 coveralls natsort pyflakes flake8 python-dateutil  decorator 'CacheControl[FileCache]' git+git://github.com/numpy/numpydoc.git
   - pip install -e . --no-deps
 script:
-  - if [ ${WITH_DOCTEST} ]; then PYTHONWARNINGS=ignore nosetests skbio --with-doctest --with-coverage -I DONOTIGNOREANYTHING; else PYTHONWARNINGS=ignore nosetests skbio --with-coverage -I DONOTIGNOREANYTHING; fi
-  - pep8 skbio setup.py checklist.py
-  - flake8 skbio setup.py checklist.py
-  - ./checklist.py
-  - pushd doc
-  - make clean
-  - make html
-  - popd
+  - PYTHONWARNINGS=ignore WITH_COVERAGE=TRUE make test
+  - make -C doc clean html
 after_success:
   - coveralls
+notifications:
+  webhooks:
+    urls:
+      - https://webhooks.gitter.im/e/9a439713959e710c7971
+    on_success: change
+    on_failure: always
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f694b21..45cc1a0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,73 @@
 # scikit-bio changelog
 
+## Version 0.4.0 (2015-07-08)
+
+Initial beta release. In addition to the changes detailed below, the following
+subpackages have been mostly or entirely rewritten and most of their APIs are
+substantially different (and improved!):
+
+* `skbio.sequence`
+* `skbio.io`
+
+The APIs of these subpackages are now stable, and all others are experimental. See the [API stability docs](https://github.com/biocore/scikit-bio/tree/0.4.0/doc/source/user/api_stability.rst) for more details, including what we mean by *stable* and *experimental* in this context. We recognize that this is a lot of backward-incompatible changes. To avoid these types of changes being a surprise to our users, our public APIs are now decorated to make it clear to developers when an API can b [...]
+
+### Features
+* Added `skbio.stats.composition` for analyzing data made up of proportions
+* Added new ``skbio.stats.evolve`` subpackage for evolutionary statistics. Currently contains a single function, ``hommola_cospeciation``, which implements a permutation-based test of correlation between two distance matrices.
+* Added support for ``skbio.io.util.open_file`` and ``skbio.io.util.open_files`` to pull files from HTTP and HTTPS URLs. This behavior propagates to the I/O registry.
+* FASTA/QUAL (``skbio.io.format.fasta``) and FASTQ (``skbio.io.format.fastq``) readers now allow blank or whitespace-only lines at the beginning of the file, between records, or at the end of the file. A blank or whitespace-only line in any other location will continue to raise an error [#781](https://github.com/biocore/scikit-bio/issues/781).
+* scikit-bio now ignores leading and trailing whitespace characters on each line while reading FASTA/QUAL and FASTQ files.
+* Added `ratio` parameter to `skbio.stats.power.subsample_power`. This allows the user to calculate power on groups for uneven size (For example, draw twice as many samples from Group B than Group A). If `ratio` is not set, group sizes will remain equal across all groups.
+* Power calculations (`skbio.stats.power.subsample_power` and `skbio.stats.power.subsample_paired_power`) can use test functions that return multiple p values, like some multivariate linear regression models. Previously, the power calculations required the test to return a single p value.
+* Added ``skbio.util.assert_data_frame_almost_equal`` function for comparing ``pd.DataFrame`` objects in unit tests.
+
+### Performance enhancements
+* The speed of quality score decoding has been significantly improved (~2x) when reading `fastq` files.
+* The speed of `NucleotideSequence.reverse_complement` has been improved (~6x).
+
+### Bug fixes
+* Changed `Sequence.distance` to raise an error any time two sequences are passed of different lengths regardless of the `distance_fn` being passed. [(#514)](https://github.com/biocore/scikit-bio/issues/514)
+* Fixed issue with ``TreeNode.extend`` where if given the children of another ``TreeNode`` object (``tree.children``), both trees would be left in an incorrect and unpredictable state. ([#889](https://github.com/biocore/scikit-bio/issues/889))
+* Changed the way power was calculated in `subsample_paired_power` to move the subsample selection before the test is performed. This increases the number of Monte Carlo simulations performed during power estimation, and improves the accuracy of the returned estimate. Previous power estimates from `subsample_paired_power` should be disregarded and re-calculated. ([#910](https://github.com/biocore/scikit-bio/issues/910))
+* Fixed issue where `randdm` was attempting to create asymmetric distance matrices.This was causing an error to be raised by the `DistanceMatrix` constructor inside of the `randdm` function, so that `randdm` would fail when attempting to create large distance matrices. ([#943](https://github.com/biocore/scikit-bio/issues/943))
+
+### Deprecated functionality
+* Deprecated `skbio.util.flatten`. This function will be removed in scikit-bio 0.3.1. Please use standard python library functionality
+described here [Making a flat list out of lists of lists](http://stackoverflow.com/a/952952/3639023), [Flattening a shallow list](http://stackoverflow.com/a/406199/3639023) ([#833](https://github.com/biocore/scikit-bio/issues/833))
+* Deprecated `skbio.stats.power.bootstrap_power_curve` will be removed in scikit-bio 0.4.1. It is deprecated in favor of using ``subsample_power`` or ``sample_paired_power`` to calculate a power matrix, and then the use of ``confidence_bounds`` to calculate the average and confidence intervals.
+
+### Backward-incompatible changes
+* Removed the following deprecated functionality:
+    - `skbio.parse` subpackage, including `SequenceIterator`, `FastaIterator`, `FastqIterator`, `load`, `parse_fasta`, `parse_fastq`, `parse_qual`, `write_clustal`, `parse_clustal`, and `FastqParseError`; please use `skbio.io` instead.
+    - `skbio.format` subpackage, including `fasta_from_sequence`, `fasta_from_alignment`, and `format_fastq_record`; please use `skbio.io` instead.
+    - `skbio.alignment.SequenceCollection.int_map`; please use `SequenceCollection.update_ids` instead.
+    - `skbio.alignment.SequenceCollection` methods `to_fasta` and `toFasta`; please use `SequenceCollection.write` instead.
+    - `constructor` parameter in `skbio.alignment.Alignment.majority_consensus`; please convert returned biological sequence object manually as desired (e.g., `str(seq)`).
+    - `skbio.alignment.Alignment.to_phylip`; please use `Alignment.write` instead.
+    - `skbio.sequence.BiologicalSequence.to_fasta`; please use `BiologicalSequence.write` instead.
+    - `skbio.tree.TreeNode` methods `from_newick`, `from_file`, and `to_newick`; please use `TreeNode.read` and `TreeNode.write` instead.
+    - `skbio.stats.distance.DissimilarityMatrix` methods `from_file` and `to_file`; please use `DissimilarityMatrix.read` and `DissimilarityMatrix.write` instead.
+    - `skbio.stats.ordination.OrdinationResults` methods `from_file` and `to_file`; please use `OrdinationResults.read` and `OrdinationResults.write` instead.
+    - `skbio.stats.p_value_to_str`; there is no replacement.
+    - `skbio.stats.subsample`; please use `skbio.stats.subsample_counts` instead.
+    - `skbio.stats.distance.ANOSIM`; please use `skbio.stats.distance.anosim` instead.
+    - `skbio.stats.distance.PERMANOVA`; please use `skbio.stats.distance.permanova` instead.
+    - `skbio.stats.distance.CategoricalStatsResults`; there is no replacement, please use `skbio.stats.distance.anosim` or `skbio.stats.distance.permanova`, which will return a `pandas.Series` object.
+* `skbio.alignment.Alignment.majority_consensus` now returns `BiologicalSequence('')` if the alignment is empty. Previously, `''` was returned.
+* `min_observations` was removed from `skbio.stats.power.subsample_power` and `skbio.stats.power.subsample_paired_power`. The minimum number of samples for subsampling depends on the data set and statistical tests. Having a default parameter to set unnecessary limitations on the technique.
+
+### Miscellaneous
+* Changed testing procedures
+    - Developers should now use `make test`
+    - Users can use `python -m skbio.test`
+    - Added `skbio.util._testing.TestRunner` (available through `skbio.util.TestRunner`). Used to provide a `test` method for each module init file. This class represents a unified testing path which wraps all `skbio` testing functionality.
+    - Autodetect Python version and disable doctests for Python 3.
+* `numpy` is no longer required to be installed before installing scikit-bio!
+* Upgraded checklist.py to check source files non-conforming to [new header style](http://scikit-bio.org/docs/latest/development/new_module.html). ([#855](https://github.com/biocore/scikit-bio/issues/855))
+* Updated to use `natsort` >= 4.0.0.
+* The method of subsampling was changed for ``skbio.stats.power.subsample_paired_power``. Rather than drawing a paired sample for the run and then subsampling for each count, the subsample is now drawn for each sample and each run. In test data, this did not significantly alter the power results.
+* checklist.py now enforces `__future__` imports in .py files.
+
 ## Version 0.2.3 (2015-02-13)
 
 ### Features
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 11008cf..6da2ccf 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,139 +1,152 @@
 Contributing to scikit-bio
 ==========================
 
-[scikit-bio](http://www.scikit-bio.org) is an open source software package, and we welcome community contributions. You can find the source code and test code for scikit-bio under public revision control in the scikit-bio git repository on [GitHub](https://github.com/biocore/scikit-bio). We very much welcome contributions.
+[scikit-bio](http://scikit-bio.org) is an open source software package and we welcome community contributions. You can find the scikit-bio source code on GitHub [here](https://github.com/biocore/scikit-bio).
 
-This document covers what you should do to get started with contributing to scikit-bio. You should read this whole document before considering submitting code to scikit-bio. This will save time for both you and the scikit-bio developers.
+This document covers what you should do to get started with contributing to scikit-bio. You should read the entire document before contributing code to scikit-bio. This will save time for both you and the scikit-bio developers.
 
-Type of Submissions
--------------------
+Types of contributions
+----------------------
 
-Some of the types of contributions we're interested in are new features (big or small, but for big ones it's generally a good idea to ask us if we're interested in including it before starting development), bug fixes, and documentation updates, additions, and fixes.
+We're interested in many different types of contributions, including feature additions, bug fixes, continuous integration improvements, and documentation/website updates, additions, and fixes.
 
-When considering submitting a new feature to scikit-bio, you should begin by posting an issue to the [scikit-bio issue tracker](https://github.com/biocore/scikit-bio/issues). The information that you include in that post will differ based on the type of contribution. Your contribution will also need to be fully tested (discussed further below).
+When considering contributing to scikit-bio, you should begin by posting an issue to the [scikit-bio issue tracker](https://github.com/biocore/scikit-bio/issues). The information that you include in that post will differ based on the type of contribution. Your contribution will also need to be fully tested where applicable (discussed further below).
 
-* For new features, you'll want to describe why the functionality that you are proposing to add is relevant. For it to be relevant, it should be demonstrably useful to scikit-bio users. This typically means that a new analytic method is implemented (you should describe why it's useful, ideally including a link to a paper that uses this method), or an existing method is enhanced (your implementation matches the performance of the pre-existing method while reducing runtime, memory consumpt [...]
+* For feature additions, please describe why the functionality that you are proposing to add is relevant. For it to be relevant, it should be demonstrably useful to scikit-bio users and it should also fit within the biology/bioinformatics domain. This typically means that a new analytic method is implemented (you should describe why it's useful, ideally including a link to a paper that uses this method), or an existing method is enhanced (e.g., improved performance). We will request benc [...]
 
-* For bug fixes, you should provide a detailed description of the bug so other developers can reproduce it. We take bugs in scikit-bio very seriously. Bugs can be related to errors in code, documentation, or tests. Errors in documentation or tests are usually updated in the next major release of scikit-bio. Errors in code that could result in incorrect results or inability to access certain functionality may result in a new minor release of scikit-bio.
+* For bug fixes, please provide a detailed description of the bug so other developers can reproduce it. We take bugs in scikit-bio very seriously. Bugs can be related to errors in code, documentation, or tests. Errors in documentation or tests are usually updated in the next scheduled release of scikit-bio. Errors in code that could result in incorrect results or inability to access certain functionality may result in a bug fix release of scikit-bio that is released ahead of schedule.
 
  You should include the following information in your bug report:
 
- 1. The exact command or function call that you issue to create the bug.
- 2. A link to all necessary input files for reproducing the bug. These files should only be as large as necessary to create the bug. For example, if you have an input file with 10,000 fasta-formatted sequences but the error only arises due to one of the sequences, create a new fasta file with only that sequence, run the command that was giving you problems, and verify that you still get an error. Then post that command and link to the trimmed fasta file. This is *extremely* useful to oth [...]
+ 1. The exact command(s) necessary to reproduce the bug.
+ 2. A link to all necessary input files for reproducing the bug. These files should only be as large as necessary to create the bug. For example, if you have an input file with 10,000 FASTA-formatted sequences but the error only arises due to one of the sequences, create a new FASTA file with only that sequence, run the command that was giving you problems, and verify that you still get an error. Then post that command and link to the trimmed FASTA file. This is *extremely* useful to oth [...]
 
-* For documentation additions, you should first post an issue describing what you propose to add, where you'd like to add it in the documentation, and a description of why you think it's an important addition. For documentation improvements and fixes, you should post an issue describing what is currently wrong or missing, and how you propose to address it. For more information about building and contributing to scikit-bio's documentation, see [this guide](doc/README.md).
+* For documentation additions, you should first post an issue describing what you propose to add, where you'd like to add it in the documentation, and a description of why you think it's an important addition. For documentation improvements and fixes, you should post an issue describing what is currently wrong or missing and how you propose to address it. For more information about building and contributing to scikit-bio's documentation, see our [documentation guide](doc/README.md).
 
-When you post your issue, the scikit-bio developers will respond to let you know if we agree with the addition or change. It's very important that you go through this step to avoid wasting time working on a feature that we are not interested in including in scikit-bio.
+When you post your issue, the scikit-bio developers will respond to let you know if we agree with the addition or change. It's very important that you go through this step to avoid wasting time working on a feature that we are not interested in including in scikit-bio. **This initial discussion with the developers is particularly important prior to our beta (0.4.0) release, as scikit-bio is rapidly changing. This includes complete re-writes of some of the core objects, so if you don't ge [...]
 
+Getting started
+---------------
 
-Getting started: "quick fixes"
-------------------------------
+### "quick fixes"
 
-Some of our issues are labeled as ``quick fix``. Working on [these issues](https://github.com/biocore/scikit-bio/issues?direction=desc&labels=quick+fix&milestone=&page=1&sort=updated&state=open) is a good way to get started with contributing to scikit-bio. These are usually small bugs or documentation errors that will only require one or a few lines of code to fix. Getting started by working on one of these issues will allow you to familiarize yourself with our development process before [...]
+Some of our issues are labeled as ``quick fix``. Working on [these issues](https://github.com/biocore/scikit-bio/issues?q=is%3Aopen+is%3Aissue+label%3A%22quick+fix%22) is a good way to get started with contributing to scikit-bio. These are usually small bugs or documentation errors that will only require one or a few lines of code to fix. Getting started by working on one of these issues will allow you to familiarize yourself with our development process before committing to a large amou [...]
 
+### Joining development
 
-Code Review
+Once you are more comfortable with our development process, you can check out the [``on deck`` label](https://github.com/biocore/scikit-bio/labels/on%20deck) on our issue tracker. These issues represent what our current focus is in the project. As such, they are probably the best place to start if you are looking to join the conversation and contribute code.
+
+Code review
 -----------
 
 When you submit code to scikit-bio, it will be reviewed by one or more scikit-bio developers. These reviews are intended to confirm a few points:
 
-* Your code is sufficiently well-tested (see Testing Guidelines below).
-* Your code adheres to our Coding Guidelines (see Coding Guidelines below).
-* Your code is sufficiently well-documented (see Coding Guidelines below).
-* Your code provides relevant changes or additions to scikit-bio (Type of Submissions above).
-
-This process is designed to ensure the quality of scikit-bio, and can be a very useful experience for new developers.
+* Your code provides relevant changes or additions to scikit-bio ([Types of contributions](#types-of-contributions)).
+* Your code adheres to our coding guidelines ([Coding guidelines](#coding-guidelines)).
+* Your code is sufficiently well-tested ([Testing guidelines](#testing-guidelines)).
+* Your code is sufficiently well-documented ([Documentation guidelines](#documentation-guidelines)).
 
-Particularly for big changes, if you'd like feedback on your code in the form of a code review as you work, you should request help in the issue that you created and one of the scikit-bio developers will work with you to perform regular code reviews. This can greatly reduce development time (and frustration) so we highly recommend that new developers take advantage of this rather than submitting a pull request with a massive amount of code in one chunk. That can lead to frustration when  [...]
+This process is designed to ensure the quality of scikit-bio and can be a very useful experience for new developers.
 
+Particularly for big changes, if you'd like feedback on your code in the form of a code review as you work, you should request help in the issue that you created and one of the scikit-bio developers will work with you to perform regular code reviews. This can greatly reduce development time (and frustration) so we highly recommend that new developers take advantage of this rather than submitting a pull request with a massive amount of code. That can lead to frustration when the developer [...]
 
 Submitting code to scikit-bio
 -----------------------------
 
-scikit-bio is hosted on [GitHub](http://www.github.com), and we use GitHub's [Pull Request](https://help.github.com/articles/using-pull-requests) mechanism for accepting submissions. You should go through the following steps to submit code to scikit-bio.
+scikit-bio is hosted on [GitHub](http://www.github.com), and we use GitHub's [Pull Request](https://help.github.com/articles/using-pull-requests) mechanism for reviewing and accepting submissions. Once You should go through the following steps to submit code to scikit-bio.
 
-1. Begin by [creating an issue](https://github.com/biocore/scikit-bio/issues) describing your proposed change. This should include a description of your proposed change (is it a new feature, a bug fix, etc.), and note in the issue description that you want to work on it. Once you hear back from a maintainer that it is OK to make changes (i.e., they dont't have local edits, they agree with the change you'd like to make, and they're comfortable with you editing their code), we will assign  [...]
+1. Begin by [creating an issue](https://github.com/biocore/scikit-bio/issues) describing your proposed change (see [Types of contributions](#types-of-contributions) for details).
 
-2. [Fork](https://help.github.com/articles/fork-a-repo) the scikit-bio repository on the GitHub website to your GitHub account.
+2. [Fork](https://help.github.com/articles/fork-a-repo) the scikit-bio repository on the GitHub website.
 
-3. Clone your forked repository to the system where you'll be developing with ``git clone``.
+3. Clone your forked repository to the system where you'll be developing with ``git clone``. ``cd`` into the ``scikit-bio`` directory that was created by ``git clone``.
 
-4. Ensure that you have the latest version of all files (especially important if you cloned a long time ago, but you'll need to do this before submitting changes regardless). You should do this by adding scikit-bio as a remote repository and then pulling from that repository. You'll only need to run the ``git remote`` step one time:
-```
-git checkout master
-git remote add upstream https://github.com/biocore/scikit-bio.git
-git pull upstream master
-```
+4. Ensure that you have the latest version of all files. This is especially important if you cloned a long time ago, but you'll need to do this before submitting changes regardless. You should do this by adding scikit-bio as a remote repository and then pulling from that repository. You'll only need to run the ``git remote`` command the first time you do this:
 
-5. Create a new topic branch that you will make your changes in with ``git checkout -b``:
-```
-git checkout -b my-topic-branch
-```
+ ```
+ git remote add upstream https://github.com/biocore/scikit-bio.git
+ git checkout master
+ git pull upstream master
+ ```
 
-6. Run ``nosetests --with-doctest ; pep8 skbio setup.py`` to confirm that the tests pass before you make any changes.
+5. Install scikit-bio in "development mode" so that your changes are reflected in the installed package without having to reinstall the package each time:
 
-7. Make your changes, add them (with ``git add``), and commit them (with ``git commit``). Don't forget to update associated scripts and tests as necessary. You should make incremental commits, rather than one massive commit at the end. Write descriptive commit messages to accompany each commit.
+ ```
+ pip install -e .
+ ```
 
-8. When you think you're ready to submit your code, again ensure that you have the latest version of all files in case some changed while you were working on your edits. You can do this by merging master into your topic branch:
-```
-git checkout my-topic-branch
-git pull upstream master
-```
+6. Create a new topic branch that you will make your changes in with ``git checkout -b``:
 
-9. Run ``nosetests --with-doctest ; pep8 skbio setup.py`` to ensure that your changes did not cause anything expected to break.
+ ```
+ git checkout -b my-topic-branch
+ ```
 
-10. Once the tests pass, you should push your changes to your forked repository on GitHub using:
-```
-git push origin my-topic-branch
-```
+ What you name your topic branch is up to you, though we recommend including the issue number in the topic branch, since there is usually already an issue associated with the changes being made in the pull request. For example, if you were addressing issue number 42, you might name your topic branch ``issue-42``.
 
-11. Issue a [pull request](https://help.github.com/articles/using-pull-requests) on the GitHub website to request that we merge your branch's changes into scikit-bio's master branch. One of the scikit-bio developers will review your code at this stage. If we request changes (which is very common), *don't issue a new pull request*. You should make changes on your topic branch, and commit and push them to GitHub. Your pull request will update automatically.
+7. Run ``make test`` to confirm that the tests pass before you make any changes.
 
+8. Make your changes, add them (with ``git add``), and commit them (with ``git commit``). Don't forget to update associated tests and documentation as necessary. Write descriptive commit messages to accompany each commit. We recommend following [NumPy's commit message guidelines](http://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message), including the usage of commit tags (i.e., starting commit messages with acronyms such ``ENH``, ``BUG``, etc.).
 
-Coding Guidelines
------------------
+9. Please mention your changes in [CHANGELOG.md](CHANGELOG.md). This file informs scikit-bio *users* of changes made in each release, so be sure to describe your changes with this audience in mind. It is especially important to note API additions and changes, particularly if they are backward-incompatible, as well as bug fixes. Be sure to make your updates under the section designated for the latest development version of scikit-bio (this will be at the top of the file). Describe your ch [...]
+
+10. When you're ready to submit your code, ensure that you have the latest version of all files in case some changed while you were working on your edits. You can do this by merging master into your topic branch:
 
-We adhere to the [PEP 8](http://www.python.org/dev/peps/pep-0008/) python coding guidelines for code and documentation standards. Before submitting any code to scikit-bio, you should read these carefully and apply the guidelines in your code.
+ ```
+ git checkout master
+ git pull upstream master
+ git checkout my-topic-branch
+ git merge master
+ ```
 
+11. Run ``make test`` to ensure that your changes did not cause anything expected to break.
 
-Testing Guidelines
+12. Once the tests pass, you should push your changes to your forked repository on GitHub using:
+
+ ```
+ git push origin my-topic-branch
+ ```
+
+13. Issue a [pull request](https://help.github.com/articles/using-pull-requests) on the GitHub website to request that we merge your branch's changes into scikit-bio's master branch. Be sure to include a description of your changes in the pull request, as well as any other information that will help the scikit-bio developers involved in reviewing your code. Please include ``fixes #<issue-number>`` in your pull request description or in one of your commit messages so that the correspondin [...]
+
+Coding guidelines
+-----------------
+
+We adhere to the [PEP 8](http://www.python.org/dev/peps/pep-0008/) Python style guidelines. Please see scikit-bio's [coding guidelines](http://scikit-bio.org/docs/latest/development/coding_guidelines.html) for more details. Before submitting code to scikit-bio, you should read this document carefully and apply the guidelines in your code.
+
+Testing guidelines
 ------------------
 
-All code that is added to scikit-bio must be unit tested, and the unit test code must be submitted in the same pull request as the library code that you are submitting. We will only merge code that is unit tested and that passes the [continuous integration build](https://github.com/biocore/scikit-bio/blob/master/.travis.yml), this build verifies that the:
+All code that is added to scikit-bio must be unit tested, and the unit test code must be submitted in the same pull request as the library code that you are submitting. We will only merge code that is unit tested and that passes the [continuous integration build](https://github.com/biocore/scikit-bio/blob/master/.travis.yml). This build includes, but is not limited to, the following checks:
 
-- Full test suite executes without errors.
-- Doctests execute correctly (currently only for Python 2.7).
+- Full unit test suite executes without errors in Python 2 and 3.
+- Doctests execute correctly (currently only for Python 2).
 - C code can be correctly compiled.
 - Cython code is correctly generated.
-- All code is valid in Python 2.7 and >=3.3.
 - All tests import functionality from the appropriate minimally deep API.
+- Documentation can be built.
+- Current code coverage is maintained or improved.
+- Code passes ``pep8``/``flake8`` checks.
+
+Running ``make test`` locally during development will include a subset of the full checks performed by Travis-CI.
 
 The scikit-bio coding guidelines describe our [expectations for unit tests](http://scikit-bio.org/development/coding_guidelines.html). You should review the unit test section before working on your test code.
 
-Tests can be executed using [nose](https://nose.readthedocs.org/en/latest/) by running `nosetests --with-doctest` from the base directory of the project or from within a Python or IPython session running the following code:
+Tests can be executed by running ``make test`` from the base directory of the project or from within a Python or IPython session:
 
 ``` python
 >>> import skbio
 >>> skbio.test()
 # full test suite is executed
->>> skbio.parse.test()
-# tests for the parse module are executed
+>>> skbio.io.test()
+# tests for the io module are executed
 ```
 
-Note that this is possible because the lines below are added at the end of each `__init__.py` file in the package, so if you add a new module, be sure to include these lines in its `__init__.py`:
-
-```python
-from numpy.testing import Tester
-test = Tester().test
-```
-
-
-Documentation Guidelines
+Documentation guidelines
 ------------------------
 
-We strive to keep scikit-bio's code well-documented, particularly its public-facing API. See our [documentation guide](doc/README.md) for more details on writing documentation in scikit-bio.
+We strive to keep scikit-bio well-documented, particularly its public-facing API. See our [documentation guide](doc/README.md) for more details.
 
 Getting help with git
-=====================
+---------------------
 
 If you're new to ``git``, you'll probably find [gitref.org](http://gitref.org/) helpful.
diff --git a/MANIFEST.in b/MANIFEST.in
index f986969..93ee436 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,7 @@
 include CHANGELOG.md
-include COPYING.txt
 include CONTRIBUTING.md
+include COPYING.txt
+include Makefile
 include README.rst
 include RELEASE.md
 include checklist.py
diff --git a/skbio/format/__init__.py b/Makefile
similarity index 60%
rename from skbio/format/__init__.py
rename to Makefile
index 18cec47..d56ce75 100644
--- a/skbio/format/__init__.py
+++ b/Makefile
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,5 +6,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
-test = Tester().test
+ifeq ($(WITH_COVERAGE), TRUE)
+	TEST_COMMAND = coverage run -m skbio.test
+else
+	TEST_COMMAND = python -m skbio.test
+endif
+
+test:
+	$(TEST_COMMAND)
+	pep8 skbio setup.py checklist.py
+	flake8 skbio setup.py checklist.py
+	./checklist.py
diff --git a/README.rst b/README.rst
index 443b1a0..b1633f0 100644
--- a/README.rst
+++ b/README.rst
@@ -1,127 +1,70 @@
-::
-
-               _ _    _ _          _     _
-              (_) |  (_) |        | |   (_)
-      ___  ___ _| | ___| |_ ______| |__  _  ___
-     / __|/ __| | |/ / | __|______| '_ \| |/ _ \
-     \__ \ (__| |   <| | |_       | |_) | | (_) |
-     |___/\___|_|_|\_\_|\__|      |_.__/|_|\___/
-
-
-           Opisthokonta
-                   \  Amoebozoa
-                    \ /
-                     *    Euryarchaeota
-                      \     |_ Crenarchaeota
-                       \   *
-                        \ /
-                         *
-                        /
-                       /
-                      /
-                     *
-                    / \
-                   /   \
-        Proteobacteria  \
-                       Cyanobacteria
-
-|Build Status| |Coverage Status|
-
-scikit-bio is an open-source, BSD-licensed python package providing data structures, algorithms and educational resources for bioinformatics.
 
-To view scikit-bio's documentation, visit `scikit-bio.org
-<http://scikit-bio.org>`__.
-
-scikit-bio is currently in alpha. We are very actively developing it, and **backwards-incompatible interface changes can and will arise**. Once the API has started to solidify, we will strive to maintain backwards compatibility. We will provide deprecation warnings wherever possible in the scikit-bio code, documentation, and CHANGELOG.md.
-
-**Note:** Deprecation warnings will be issued using Python's ``DeprecationWarning`` class. Since Python 2.7, these types of warnings are **silenced by default**. When developing a tool that uses scikit-bio, we recommend enabling the display of deprecation warnings to be informed of upcoming API changes. For details on how to display deprecation warnings, see `Python's deprecation warning docs <https://docs.python.org/3/whatsnew/2.7.html#changes-to-the-handling-of-deprecation-warnings>`_.
-
-Installation of release version (recommended for most users)
-------------------------------------------------------------
-
-To install the latest release version of scikit-bio you should run::
-
-    pip install numpy
-    pip install scikit-bio
+.. image:: http://scikit-bio.org/assets/logo.svg
+   :target: http://scikit-bio.org
+   :alt: scikit-bio logo
 
-Equivalently, you can use the ``conda`` package manager available in `Anaconda <http://continuum.io/downloads>`_ or `miniconda <http://conda.pydata.org/miniconda.html>`_ to install scikit-bio and all its dependencies, without having to compile them::
+|Build Status| |Coverage Status| |Gitter Badge|
 
-     conda install scikit-bio
+scikit-bio is an open-source, BSD-licensed Python package providing data structures, algorithms and educational resources for bioinformatics.
 
-Finally, most scikit-bio's dependencies (in particular, the ones that are trickier to build) are also available, albeit only for Python 2, in `Canopy Express <https://www.enthought.com/canopy-express/>`_.
-
-You can verify your installation by running the scikit-bio unit tests as follows::
-
-    nosetests --with-doctest skbio
-
-Installation of development version
------------------------------------
-
-If you're interested in working with the latest development release of scikit-bio (recommended for developers only, as the development code can be unstable and less documented than the release code), you can clone the repository and install as follows. This will require that you have ``git`` installed.
-::
+To view scikit-bio's documentation, visit `scikit-bio.org
+<http://scikit-bio.org>`__.
 
-    git clone git at github.com:biocore/scikit-bio.git
-    cd scikit-bio
-    pip install .
+scikit-bio is currently in beta. We are very actively developing it, and **backward-incompatible interface changes can and will arise**. To avoid these types of changes being a surprise to our users, our public APIs are decorated to make it clear to users when an API can be relied upon (stable) and when it may be subject to change (experimental). See the `API stability docs <https://github.com/biocore/scikit-bio/tree/0.4.0/doc/source/user/api_stability.rst>`_ for more details, including  [...]
 
-After this completes, you can run the scikit-bio unit tests as follows. You must first ``cd`` out of the ``scikit-bio`` directory for the tests to pass (here we ``cd`` to the home directory).
-::
+Installing
+----------
 
-    cd
-    nosetests --with-doctest skbio
+To install the latest release of scikit-bio::
 
-For developers of scikit-bio, if you don't want to be forced to re-install after every change, you can modify the above ``pip install`` command to::
+    pip install scikit-bio
 
-    pip install -e .
+Equivalently, you can use the ``conda`` package manager available in `Anaconda <http://continuum.io/downloads>`_ or `miniconda <http://conda.pydata.org/miniconda.html>`_ to install scikit-bio and its dependencies without having to compile them::
 
-This will build scikit-bio's Cython extensions, and will create a link in the ``site-packages`` directory to the scikit-bio source directory. When you then make changes to code in the source directory, those will be used (e.g., by the unit tests) without re-installing.
+    conda install scikit-bio
 
-Finally, if you don't want to use ``pip`` to install scikit-bio, and prefer to just put ``scikit-bio`` in your ``$PYTHONPATH``, at the minimum you should run::
+Finally, most of scikit-bio's dependencies (in particular, the ones that are trickier to build) are also available, albeit only for Python 2, in `Canopy Express <https://www.enthought.com/canopy-express/>`_.
 
-    python setup.py build_ext --inplace
+You can verify your installation by running the scikit-bio unit tests::
 
-This will build scikit-bio's Cython extensions, but not create a link to the scikit-bio source directory in ``site-packages``. If this isn't done, using certain components of scikit-bio will be inefficient and will produce an ``EfficiencyWarning``.
+    python -m skbio.test
 
 Getting help
 ------------
 
-To get help with scikit-bio, you should use the `skbio <http://stackoverflow.com/questions/tagged/skbio>`_ tag on StackOverflow (SO). Before posting a question, check out SO's guide on how to `ask a question <http://stackoverflow.com/questions/how-to-ask>`_. The scikit-bio developers regularly monitor the skbio SO tag.
-
-Licensing
----------
-
-scikit-bio is available under the new BSD license. See
-`COPYING.txt <https://github.com/biocore/scikit-bio/blob/master/COPYING.txt>`__ for scikit-bio's license, and the
-`licenses directory <https://github.com/biocore/scikit-bio/tree/master/licenses>`_ for the licenses of third-party software that is
-(either partially or entirely) distributed with scikit-bio.
+To get help with scikit-bio, you should use the `skbio <http://stackoverflow.com/questions/tagged/skbio>`_ tag on StackOverflow (SO). Before posting a question, check out SO's guide on how to `ask a question <http://stackoverflow.com/questions/how-to-ask>`_. The scikit-bio developers regularly monitor the ``skbio`` SO tag.
 
 Projects using scikit-bio
 -------------------------
 
 Some of the projects that we know of that are using scikit-bio are:
 
--  `QIIME <http://qiime.org/>`__
--  `Emperor <http://biocore.github.io/emperor/>`__
--  `An Introduction to Applied
-   Bioinformatics <http://caporasolab.us/An-Introduction-To-Applied-Bioinformatics/>`__
--  `tax2tree <https://github.com/biocore/tax2tree>`__
+- `QIIME <http://qiime.org/>`__
+- `Emperor <http://biocore.github.io/emperor/>`__
+- `An Introduction to Applied
+  Bioinformatics <http://readIAB.org>`__
+- `tax2tree <https://github.com/biocore/tax2tree>`__
+- `Qiita <http://qiita.microbio.me>`__
+- `ghost-tree <https://github.com/JTFouquier/ghost-tree>`__
+- `Platypus-Conquistador <https://github.com/biocore/Platypus-Conquistador>`__
 
-If you're using scikit-bio in your own projects, you can issue a
-pull request to add them to this list.
+If you're using scikit-bio in your own projects, feel free to issue a pull request to add them to this list.
 
 scikit-bio development
 ----------------------
 
-If you're interested in getting involved in or learning about
-scikit-bio development, see `CONTRIBUTING.md <https://github.com/biocore/scikit-bio/blob/master/CONTRIBUTING.md>`__.
+If you're interested in getting involved in scikit-bio development, see `CONTRIBUTING.md <https://github.com/biocore/scikit-bio/blob/master/CONTRIBUTING.md>`__.
 
-See the `list of all of scikit-bio's contributors
+See the list of `scikit-bio's contributors
 <https://github.com/biocore/scikit-bio/graphs/contributors>`__.
 
-Summaries of our weekly developer meetings are posted on
-HackPad. Click `here
-<https://hackpad.com/2014-scikit-bio-developer-meeting-notes-1S2RbMqy0iM>`__
-to view the meeting notes for 2014.
+Licensing
+---------
+
+scikit-bio is available under the new BSD license. See
+`COPYING.txt <https://github.com/biocore/scikit-bio/blob/master/COPYING.txt>`__ for scikit-bio's license, and the
+`licenses directory <https://github.com/biocore/scikit-bio/tree/master/licenses>`_ for the licenses of third-party software that is
+(either partially or entirely) distributed with scikit-bio.
 
 The pre-history of scikit-bio
 -----------------------------
@@ -137,7 +80,7 @@ been ported to scikit-bio are: Rob Knight (`@rob-knight
 <https://github.com/wasade>`__), Micah Hamady, Antonio Gonzalez
 (`@antgonza <https://github.com/antgonza>`__), Sandra Smit, Greg
 Caporaso (`@gregcaporaso <https://github.com/gregcaporaso>`__), Jai
-Ram Rideout (`@ElBrogrammer <https://github.com/ElBrogrammer>`__),
+Ram Rideout (`@jairideout <https://github.com/jairideout>`__),
 Cathy Lozupone (`@clozupone <https://github.com/clozupone>`__), Mike Robeson
 (`@mikerobeson <https://github.com/mikerobeson>`__), Marcin Cieslik,
 Peter Maxwell, Jeremy Widmann, Zongzhi Liu, Michael Dwan, Logan Knecht
@@ -154,11 +97,11 @@ Logo
 ----
 
 scikit-bio's logo was created by `Alina Prassas <http://cargocollective.com/alinaprassas>`_.
-scikit-bio's ASCII art tree was created by `@gregcaporaso
-<https://github.com/gregcaporaso>`_. Our text logo was created at `patorjk.com
-<http://patorjk.com/software/taag/>`__.
 
 .. |Build Status| image:: https://travis-ci.org/biocore/scikit-bio.svg?branch=master
    :target: https://travis-ci.org/biocore/scikit-bio
 .. |Coverage Status| image:: https://coveralls.io/repos/biocore/scikit-bio/badge.png
    :target: https://coveralls.io/r/biocore/scikit-bio
+.. |Gitter Badge| image:: https://badges.gitter.im/Join%20Chat.svg
+   :alt: Join the chat at https://gitter.im/biocore/scikit-bio
+   :target: https://gitter.im/biocore/scikit-bio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
diff --git a/RELEASE.md b/RELEASE.md
index 20de01a..ef4af61 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -14,7 +14,7 @@ To illustrate examples of commands you might run, let's assume that the current
 
 1. Ensure the Travis build is passing against master.
 
-2. Update the version strings (1.2.3-dev) to the new version (1.2.4). There should only be two places this needs to be done: ``setup.py`` and ``skbio/__init__.py``. It's a good idea to ``grep`` for the current version string just to be safe:
+2. Update the version strings (1.2.3-dev) to the new version (1.2.4). There should only be one place this needs to be done: ``skbio/__init__.py``. It's a good idea to ``grep`` for the current version string just to be safe:
 
         grep -ir '1\.2\.3-dev' *
 
@@ -70,10 +70,9 @@ Once the release is created on GitHub, it's a good idea to test out the release
 
 3. Install the release and run the tests:
 
-        pip install numpy
         pip install .
         cd
-        nosetests --with-doctest skbio
+        python -m skbio.test
 
 4. During this process (it can take awhile to install all of scikit-bio's dependencies), submit a pull request to update the version strings from 1.2.4 to 1.2.4-dev. Use the same strategy described above to update the version strings. Update ``CHANGELOG.md`` to include a new section for 1.2.4-dev (there won't be any changes to note here yet). **Do not merge this pull request yet.**
 
@@ -92,9 +91,8 @@ Assuming the GitHub release tarball correctly installs and passes its tests, you
 3. Create a new virtualenv and run:
 
         cd
-        pip install numpy
         pip install <path to extracted scikit-bio release>/dist/scikit-bio-1.2.4.tar.gz
-        nosetests --with-doctest skbio
+        python -m skbio.test
 
 4. If everything goes well, it is finally time to push the release to PyPI:
 
@@ -105,9 +103,8 @@ Assuming the GitHub release tarball correctly installs and passes its tests, you
 5. Once the release is available on PyPI, do a final round of testing. Create a new virtualenv and run:
 
         cd
-        pip install numpy
         pip install scikit-bio
-        nosetests --with-doctest skbio
+        python -m skbio.test
 
 If this succeeds, the release appears to be a success!
 
diff --git a/assets/logo-inverted.svg b/assets/logo-inverted.svg
new file mode 100644
index 0000000..45e9d42
--- /dev/null
+++ b/assets/logo-inverted.svg
@@ -0,0 +1,76 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   version="1.1"
+   width="900"
+   height="250"
+   id="svg2"
+   xml:space="preserve"><metadata
+     id="metadata8"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs6"><clipPath
+       id="clipPath16"><path
+         d="M 0,200 720,200 720,0 0,0 0,200 z"
+         id="path18" /></clipPath></defs><g
+     transform="matrix(1.25,0,0,-1.25,0,250)"
+     id="g10"><g
+       id="g12"><g
+         clip-path="url(#clipPath16)"
+         id="g14"><g
+           transform="translate(63.7012,122.0288)"
+           id="g20"><path
+             d="m 0,0 -40.188,0 0,-14.848 36.756,0 c 5.368,0 9.031,-0.935 10.988,-2.804 1.958,-1.87 2.936,-5.445 2.936,-10.724 l 0,-6.863 c 0,-5.279 -0.957,-8.853 -2.87,-10.723 -1.914,-1.87 -5.599,-2.805 -11.054,-2.805 l -34.842,0 c -5.455,0 -9.162,0.935 -11.12,2.805 -1.958,1.87 -2.936,5.444 -2.936,10.723 l 0,1.584 9.898,2.046 0,-8.513 43.224,0 0,16.101 -36.889,0 c -5.411,0 -9.085,0.936 -11.02,2.805 -1.936,1.87 -2.904,5.445 -2.904,10.723 l 0,5.412 c 0,5.235 0.968,8.81 2.904,10.723 1.935, [...]
+             id="path22"
+             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
+           transform="translate(152.0732,122.0288)"
+           id="g24"><path
+             d="m 0,0 -39.99,0 0,-40.122 39.99,0 0,13.66 10.822,-2.046 0,-6.731 c 0,-5.235 -0.979,-8.799 -2.936,-10.69 -1.958,-1.892 -5.664,-2.838 -11.12,-2.838 l -33.655,0 c -5.455,0 -9.139,0.913 -11.053,2.739 -1.913,1.826 -2.871,5.422 -2.871,10.789 l 0,30.158 c 0,5.323 0.968,8.919 2.904,10.789 1.935,1.87 5.609,2.805 11.02,2.805 l 33.655,0 c 5.456,0 9.162,-0.957 11.12,-2.871 1.957,-1.913 2.936,-5.488 2.936,-10.723 l 0,-5.412 L 0,-12.274 0,0 z"
+             id="path26"
+             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><path
+           d="m 199.795,73.262 -10.757,0 0,57.147 10.757,0 0,-57.147 z"
+           id="path28"
+           style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
+           d="m 347.788,73.262 -10.756,0 0,57.147 10.756,0 0,-57.147 z"
+           id="path30"
+           style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /><g
+           transform="translate(404.9463,122.0288)"
+           id="g32"><path
+             d="m 0,0 0,-48.767 -10.756,0 0,48.767 -27.057,0 0,8.513 65.001,0 L 27.188,0 0,0 z"
+             id="path34"
+             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><path
+           d="m 446.069,114.242 29.432,0 0,-9.898 -29.432,0 0,9.898 z"
+           id="path36"
+           style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /><g
+           transform="translate(547.5293,98.1401)"
+           id="g38"><path
+             d="m 0,0 -31.214,0 0,-14.583 31.214,0 c 2.64,0 4.432,0.374 5.378,1.121 0.945,0.748 1.419,2.068 1.419,3.96 l 0,4.355 c 0,1.892 -0.474,3.223 -1.419,3.992 C 4.432,-0.385 2.64,0 0,0 m 0,22.965 -31.214,0 0,-13.528 31.214,0 c 2.419,0 4.069,0.308 4.949,0.924 0.88,0.616 1.32,1.671 1.32,3.167 l 0,5.411 c 0,1.496 -0.429,2.541 -1.288,3.135 C 4.124,22.668 2.463,22.965 0,22.965 m -44.939,-47.843 0,57.148 51.339,0 c 4.796,0 8.249,-0.87 10.361,-2.607 2.112,-1.738 3.168,-4.564 3.168,-8.48 l [...]
+             id="path40"
+             style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><path
+           d="m 595.471,130.41 13.857,0 0,-57.147 -13.857,0 0,57.147 z"
+           id="path42"
+           style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
+           d="m 651.386,84.216 46.062,0 0,35.701 -46.062,0 0,-35.701 z m -13.924,32.732 c 0,5.191 1.013,8.732 3.036,10.624 2.023,1.892 5.961,2.838 11.812,2.838 l 44.213,0 c 5.896,0 9.855,-0.946 11.857,-2.838 2.001,-1.892 2.991,-5.433 2.991,-10.624 l 0,-30.158 c 0,-5.235 -1.012,-8.799 -3.058,-10.69 -2.045,-1.892 -5.983,-2.838 -11.79,-2.838 l -44.213,0 c -5.851,0 -9.789,0.946 -11.812,2.838 -2.023,1.891 -3.036,5.455 -3.036,10.69 l 0,30.158 z"
+           id="path44"
+           style="fill:#43bc7d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><g
+           transform="translate(238.0605,187.9058)"
+           id="g46"><path
+             d="m 0,0 c -2.056,-42.858 6.694,-85.08 6.813,-127.865 0.013,-4.78 15.708,-1.82 15.691,4.204 C 22.387,-81.621 13.658,-40.09 15.678,2.008 15.983,8.375 0.246,5.13 0,0"
+             id="path48"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
+           transform="translate(260.583,79.9512)"
+           id="g50"><path
+             d="m 0,0 c 10.269,7.129 19.204,15.785 26.621,25.847 7.882,10.693 14.135,22.561 20.742,34.052 2.021,3.513 -1.786,6.557 -4.888,6.988 -4.099,0.569 -8.146,-1.35 -10.19,-4.906 C 20.433,41.369 9.427,21.512 -10.537,7.65 -13.405,5.66 -14.383,1.403 -11.277,-0.846 -7.892,-3.299 -3.166,-2.199 0,0"
+             id="path52"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
+           transform="translate(251.6475,76.5835)"
+           id="g54"><path
+             d="m 0,0 c 2.979,0.423 5.394,-1.374 7.73,-3.035 3.59,-2.552 7.229,-5.034 10.801,-7.611 7.694,-5.549 15.014,-11.606 22.206,-17.782 14.639,-12.57 28.594,-25.911 43.305,-38.399 4.059,-3.446 18.609,2.609 14.605,6.008 -16.079,13.65 -31.302,28.27 -47.404,41.895 C 43.175,-12.096 34.824,-5.585 26.046,0.31 22.599,2.625 19.44,5.307 15.642,7.053 11.513,8.951 6.654,8.827 2.271,8.205 -0.608,7.796 -5.774,6.377 -6.691,3.052 -7.62,-0.317 -1.795,-0.255 0,0"
+             id="path56"
+             style="fill:#778e83;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g></g></g></g></svg>
\ No newline at end of file
diff --git a/checklist.py b/checklist.py
index 00e8875..8c6442d 100755
--- a/checklist.py
+++ b/checklist.py
@@ -16,6 +16,7 @@ import os.path
 import subprocess
 import sys
 import ast
+import tokenize
 
 import dateutil.parser
 
@@ -36,8 +37,9 @@ def main():
 
     """
     root = 'skbio'
-    validators = [InitValidator(), ExecPermissionValidator(),
-                  GeneratedCythonValidator(), APIRegressionValidator()]
+    validators = [InitValidator(), CopyrightHeadersValidator(),
+                  ExecPermissionValidator(), GeneratedCythonValidator(),
+                  APIRegressionValidator(), FluxCapacitorValidator()]
 
     return_code = 0
     for validator in validators:
@@ -91,7 +93,7 @@ class RepoValidator(object):
         msg = []
         if invalids:
             success = False
-            msg.append(self.reason + ':')
+            msg.append(self.reason)
 
             for invalid in invalids:
                 msg.append("    %s" % invalid)
@@ -144,6 +146,71 @@ class RepoValidator(object):
         return stdout, stderr, return_value
 
 
+class CopyrightHeadersValidator(RepoValidator):
+    """Flag library files with non-standard copyright headers
+
+    See the current standard for scikit-bio's copyright headers at
+    ``http://scikit-bio.org/docs/latest/development/new_module.html``
+
+    Parameters
+    ----------
+    skip_dirs : iterable of str, optional
+        Directory names to skip during validation. Defaults to skipping any
+        directories named ``'data'`` or ``'__pycache__'`` (and anything
+        contained within them).
+
+    """
+
+    reason = ("Files non-conforming to standard headers as described in\n"
+              "http://scikit-bio.org/docs/latest/development/new_module.html:")
+
+    COPYRIGHT_HEADER = """\
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+"""
+
+    def __init__(self, skip_dirs=None):
+        if skip_dirs is None:
+            skip_dirs = {'data', '__pycache__'}
+        self.skip_dirs = set(skip_dirs)
+
+    def _validate(self, root, dirs, files):
+        for skip_dir in self.skip_dirs:
+            if skip_dir in dirs:
+                dirs.remove(skip_dir)
+
+        invalid_files = []
+        for _file in files:
+            if not _file.endswith('.py'):
+                continue
+            pos = 0
+            f = open(os.path.join(root, _file))
+            tokens = list(tokenize.generate_tokens(f.readline))
+
+            # A module docstring is fully described using just two tokens: the
+            # main string and a terminating newline. By convention, however, it
+            # is always followed by a newline, and thus we advance by three
+            # positions to get to the next logical line.
+            if tokens[pos][0] == tokenize.STRING:
+                pos += 3
+            # copyright header consists of 7 lines, and by discussion in
+            # preceding comment, spans through 14 tokens.
+            cheader = ''.join(map(lambda x: x[1], tokens[pos:pos + 14]))
+            # Ensure that there is no blank line at the end of the file
+            if (cheader != self.COPYRIGHT_HEADER or
+                    (tokens[pos + 14][0] != tokenize.NL and
+                     tokens[pos + 14][0] != tokenize.ENDMARKER)):
+                invalid_files.append(f.name)
+            f.close()
+
+        return invalid_files
+
+
 class InitValidator(RepoValidator):
     """Flag library code directories that are missing init files.
 
@@ -161,7 +228,7 @@ class InitValidator(RepoValidator):
         contained within them).
 
     """
-    reason = "Directories missing init files"
+    reason = "Directories missing init files:"
 
     def __init__(self, skip_dirs=None):
         if skip_dirs is None:
@@ -193,7 +260,7 @@ class ExecPermissionValidator(RepoValidator):
         C files (header and source files).
 
     """
-    reason = "Library code with execute permissions"
+    reason = "Library code with execute permissions:"
 
     def __init__(self, extensions=None):
         if extensions is None:
@@ -230,7 +297,7 @@ class GeneratedCythonValidator(RepoValidator):
         File extension for generated C files.
 
     """
-    reason = "Cython code with missing or outdated generated C code"
+    reason = "Cython code with missing or outdated generated C code:"
 
     def __init__(self, cython_ext='.pyx', c_ext='.c'):
         self.cython_ext = cython_ext
@@ -287,7 +354,7 @@ class APIRegressionValidator(RepoValidator):
 
     """
     reason = ("The following tests import `A` but should import `B`"
-              " (file: A => B)")
+              " (file: A => B):")
 
     def __init__(self):
         self._imports = {}
@@ -383,5 +450,32 @@ class APIRegressionValidator(RepoValidator):
         return skbio_imports
 
 
+class FluxCapacitorValidator(RepoValidator):
+    """Ensure that the __future__ statements are fluxing correctly"""
+    reason = ("These files do not have the following import at the start:\n\n"
+              "from __future__ import absolute_import, division,"
+              " print_function\n")
+
+    def _validate(self, root, dirs, files):
+        failures = []
+        expected = {"absolute_import", "division", "print_function"}
+        for file in files:
+            if file.endswith(".py"):
+                filename = os.path.join(root, file)
+                failures.append(filename)
+                with open(filename) as f:
+                    source = ast.parse(f.read())
+                    for node, _ in zip(ast.iter_child_nodes(source), range(2)):
+                        if isinstance(node, ast.Expr):
+                            continue
+                        if isinstance(node, ast.ImportFrom):
+                            if node.module == "__future__":
+                                if expected.issubset(
+                                        {n.name for n in node.names}):
+                                    failures.pop()
+                            break
+        return failures
+
+
 if __name__ == '__main__':
     sys.exit(main())
diff --git a/doc/README.md b/doc/README.md
index 613020f..c1c0ac7 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -13,12 +13,13 @@ Building the documentation
 To build the documentation, you'll need the following Python packages
 installed:
 
-- [Sphinx](http://sphinx-doc.org/) >= 1.2.2
+- [Sphinx](http://sphinx-doc.org/) == 1.2.2
 - [sphinx-bootstrap-theme](https://pypi.python.org/pypi/sphinx-bootstrap-theme/)
+- [numpydoc](https://github.com/numpy/numpydoc) >= v0.6
 
 An easy way to install the dependencies is via pip:
 
-    pip install Sphinx sphinx-bootstrap-theme
+    pip install Sphinx sphinx-bootstrap-theme git+git://github.com/numpy/numpydoc.git
 
 Finally, you will need to install scikit-bio.
 
@@ -32,10 +33,9 @@ a virtualenv) or point your ```PYTHONPATH``` environment variable to this code,
 To build the documentation, assuming you are at the top-level scikit-bio
 directory:
 
-    cd doc
-    make html
+    make -C doc clean html
 
-The built HTML documentation will be at ```build/html/index.html```.
+The built HTML documentation will be at ```doc/build/html/index.html```.
 
 Contributing to the documentation
 ---------------------------------
@@ -45,19 +45,15 @@ something entirely new or by modifying existing documentation, please first
 review our [scikit-bio contribution guide](../CONTRIBUTING.md).
 
 Before submitting your changes, ensure that the documentation builds without
-any errors or warnings, and that there are no broken links:
-
-    make clean
-    make html
-    make linkcheck
+errors or warnings.
 
 ### Documentation guidelines
 
 Most of scikit-bio's API documentation is automatically generated from
 [docstrings](http://legacy.python.org/dev/peps/pep-0257/#what-is-a-docstring).
 The advantage to this approach is that users can access the documentation in an
-interactive Python session or from our website as HTML. Other output forms are
-also possible, such as PDF.
+interactive Python session or from our website as HTML. Other output formats
+are also possible, such as PDF.
 
 scikit-bio docstrings follow the [numpydoc conventions](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt).
 This ensures that the docstrings are easily readable both from the interpreter
@@ -83,10 +79,9 @@ you'd like to add to scikit-bio (let's call it ```skbio/example.py```).
 #### Module docstring
 
 The first thing you'll need to add is a docstring for the module. The docstring
-should be the first thing in the file following the ```#!``` line. It should
-start with a title for the module:
+must start at the first line of the file. It should start with a title for the
+module:
 
-    #!/usr/bin/env python
     """
     Documentation examples (:mod:`skbio.example`)
     =============================================
@@ -149,8 +144,8 @@ to each object is inserted into the page for you.
 After listing public module members, we encourage a usage example section
 showing how to use some of the module's functionality. Examples should be
 written in [doctest](http://docs.python.org/2/library/doctest.html) format so
-that they can be automatically tested (e.g., using ```nosetests
---with-doctest``` or ```make doctest```).
+that they can be automatically tested (e.g., using ```make test``` or
+```python -m skbio.test```).
 
     Examples
     --------
@@ -166,7 +161,7 @@ documentation with the ```.. plot::``` directive. For example:
 
     .. plot::
 
-       >>> from skbio.draw.distributions import boxplots
+       >>> from skbio.draw import boxplots
        >>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
 
 This will include the plot, a link to the source code used to generate the
@@ -182,25 +177,14 @@ functions, and exceptions), follow the numpydoc conventions. In addition to
 these conventions, there are a few things to keep in mind:
 
 - When documenting a class, only public methods and attributes are included in
-  the built documentation by default. If a method or attribute starts with an
-  underscore, it is assumed to be private. If you want a private method to be
-  included in the built documentation, add the following line to the method's
-  docstring:
-
-    ```
-    .. shownumpydoc
-    ```
-
-  For example, you might want to document "special" methods such as
-  ```__getitem__```, ```__str__```, etc., which would be ignored by default. We
-  recommend placing this at the end of the docstring for consistency. Note that
-  this will only work for methods; private attributes will *always* be ignored.
-
-- When documenting a class, include the ```Parameters``` and ```Attributes```
-  sections in the class docstring, instead of in the ```__init__``` docstring.
-  While numpydoc technically supports either form,
-  ```__init__``` is not included in the list of methods by default and thus
-  should have its documentation included in the class docstring.
+  the built documentation. If a method or attribute starts with an
+  underscore, it is assumed to be private.
+
+- When documenting a class, include the ```Parameters``` section in the class
+  docstring, instead of in the ```__init__``` docstring. While numpydoc
+  technically supports either form, ```__init__``` is not included in the list
+  of methods by default and thus should have its documentation included in the
+  class docstring.
 
 #### Including the module in the docs
 
diff --git a/doc/source/_static/style.css b/doc/source/_static/style.css
index 4ddba1e..3ae6991 100644
--- a/doc/source/_static/style.css
+++ b/doc/source/_static/style.css
@@ -22,6 +22,7 @@ cite, code {
     white-space: nowrap !important;
     border-radius: 4px !important;
     border: 1px solid #CCC !important;
+    font-style: normal !important;
     font-family: Menlo,Monaco,Consolas,"Courier New",monospace !important;
 }
 
diff --git a/doc/source/_templates/autosummary/class.rst b/doc/source/_templates/autosummary/class.rst
index daa8ff7..7cc1e1c 100644
--- a/doc/source/_templates/autosummary/class.rst
+++ b/doc/source/_templates/autosummary/class.rst
@@ -1,6 +1,6 @@
 {% extends "!autosummary/class.rst" %}
 
-{# Taken and modified from scipy's sphinx documentation setup (https://github.com/scipy/scipy/blob/master/doc/source/_templates/autosummary/class.rst). #}
+{# Taken from scipy's sphinx documentation setup (https://github.com/scipy/scipy/blob/master/doc/source/_templates/autosummary/class.rst). #}
 
 {% block methods %}
 {% if methods %}
@@ -8,7 +8,9 @@
       .. autosummary::
          :toctree:
       {% for item in all_methods %}
+         {%- if not item.startswith('_') or item in ['__call__'] %}
          {{ name }}.{{ item }}
+         {%- endif -%}
       {%- endfor %}
 {% endif %}
 {% endblock %}
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 982ff5c..3905715 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -1,23 +1,46 @@
+# NOTE: parts of this file were taken from scipy's doc/source/conf.py. See
+# scikit-bio/licenses/scipy.txt for scipy's license.
+
 import glob
 import sys
 import os
 
+# Check that dependencies are installed and the correct version if necessary
+sphinx_version = '1.2.2'
+import sphinx
+if sphinx.__version__ != sphinx_version:
+    raise RuntimeError("Sphinx %s required" % sphinx_version)
+
 import sphinx_bootstrap_theme
 
-import skbio
+# We currently rely on the latest version of numpydoc available on GitHub:
+#   git+git://github.com/numpy/numpydoc.git
+#
+# There isn't a way to specify this in setup.py as a dependency since this
+# feature is being removed from pip. We also can't check the version of
+# numpydoc installed because there isn't a numpydoc.__version__ defined.
+try:
+    import numpydoc
+except ImportError:
+    raise RuntimeError(
+        "numpydoc v0.6 or later required. Install it with:\n"
+        "  pip install git+git://github.com/numpy/numpydoc.git")
 
-# NOTE: parts of this file were taken from scipy's doc/source/conf.py. See
-# scikit-bio/licenses/scipy.txt for scipy's license.
+import skbio
+from skbio.util._decorator import classproperty
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('../sphinxext/numpydoc'))
+# documentation root, use os.path.abspath to make it absolute, like shown here:
+#
+#    sys.path.insert(0, os.path.abspath('../sphinxext/foo'))
 
 # -- General configuration ------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '1.1'
+# Using `sphinx_version` doesn't work, likely because Sphinx is expecting a
+# version string of the form X.Y, not X.Y.Z.
+needs_sphinx = '1.2'
 
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@@ -279,8 +302,8 @@ man_pages = [
 texinfo_documents = [
   ('index', 'scikit-bio', u'scikit-bio Documentation',
    u'scikit-bio development team', 'scikit-bio',
-   'Core objects, functions and statistics for working with biological data '
-   'in Python.', 'Miscellaneous'),
+   'Data structures, algorithms, and educational resources for working with '
+   'biological data in Python.', 'Miscellaneous'),
 ]
 
 # Documents to append as an appendix to all manuals.
@@ -301,6 +324,10 @@ autosummary_generate = glob.glob('*.rst')
 # -- Options for numpydoc -------------------------------------------------
 # Generate plots for example sections
 numpydoc_use_plots = True
+# If we don't turn numpydoc's toctree generation off, Sphinx will warn about
+# the toctree referencing missing document(s). This appears to be related to
+# generating docs for classes with a __call__ method.
+numpydoc_class_members_toctree = False
 
 #------------------------------------------------------------------------------
 # Plot
@@ -434,9 +461,24 @@ def linkcode_resolve(domain, info):
 # Link-checking on Travis sometimes times out.
 linkcheck_timeout = 30
 
+# This is so that our docs build.
+def _closure():
+    def __get__(self, cls, owner):
+        return self
+
+    classproperty.__get__ = __get__
+
+_closure()
+
+def autodoc_skip_member(app, what, name, obj, skip, options):
+    if what == "method":
+        if isinstance(obj, classproperty):
+            return True
+    return skip
 
 # Add the 'copybutton' javascript, to hide/show the prompt in code
 # examples, originally taken from scikit-learn's doc/conf.py
 def setup(app):
     app.add_javascript('copybutton.js')
     app.add_stylesheet('style.css')
+    app.connect('autodoc-skip-member', autodoc_skip_member)
diff --git a/doc/source/development/coding_guidelines.rst b/doc/source/development/coding_guidelines.rst
index 48cee36..2bafd17 100644
--- a/doc/source/development/coding_guidelines.rst
+++ b/doc/source/development/coding_guidelines.rst
@@ -7,7 +7,7 @@ As project size increases, consistency of the code base and documentation become
 * you can trust that the code that you're working with is sufficiently tested, and
 * names and interfaces are intuitive.
 
-**As scikit-bio is in alpha, our coding guidelines are presented here as a working draft. These guidelines are requirements for all code submitted to scikit-bio, but at this stage the guidelines themselves are malleable. If you disagree with something, or have a suggestion for something new to include, you should** `create an issue`_ **to initiate a discussion.**
+**As scikit-bio is in beta, our coding guidelines are presented here as a working draft. These guidelines are requirements for all code submitted to scikit-bio, but at this stage the guidelines themselves are malleable. If you disagree with something, or have a suggestion for something new to include, you should** `create an issue`_ **to initiate a discussion.**
 
 .. _`create an issue`: https://github.com/biocore/scikit-bio/issues
 
@@ -269,11 +269,11 @@ Some pointers
 
 - *Test `all` the methods in your class.* You should assume that any method you haven't tested has bugs. The convention for naming tests is ``test_method_name``. Any leading and trailing underscores on the method name can be ignored for the purposes of the test; however, *all tests must start with the literal substring* ``test`` *for* ``unittest`` and ``nose`` *to find them.* If the method is particularly complex, or has several discretely different cases you need to check, use ``test_me [...]
 
-- *Docstrings for testing methods should be considered optional*, instead the description of what the method does should be included in the name itself, therefore the name should be descriptive enough such that when running ``nose -v`` you can immediately see the file and test method that's failing.
+- *Docstrings for testing methods should be considered optional*, instead the description of what the method does should be included in the name itself, therefore the name should be descriptive enough such that when running the tests in verbose mode you can immediately see the file and test method that's failing.
 
 .. code-block:: none
 
-    $ nosetests -v
+    $ python -c "import skbio; skbio.test(verbose=True)"
     skbio.maths.diversity.alpha.tests.test_ace.test_ace ... ok
     test_berger_parker_d (skbio.maths.diversity.alpha.tests.test_base.BaseTests) ... ok
 
@@ -301,9 +301,6 @@ Example of a ``nose`` test module structure
 
 .. code-block:: python
 
-    #!/usr/bin/env python
-    from __future__ import division
-
     # ----------------------------------------------------------------------------
     # Copyright (c) 2013--, scikit-bio development team.
     #
@@ -312,6 +309,8 @@ Example of a ``nose`` test module structure
     # The full license is in the file COPYING.txt, distributed with this software.
     # ----------------------------------------------------------------------------
 
+    from __future__ import absolute_import, division, print_function
+
     import numpy as np
     from nose.tools import assert_almost_equal, assert_raises
 
diff --git a/doc/source/development/new_module.rst b/doc/source/development/new_module.rst
index 063a5f7..297b987 100644
--- a/doc/source/development/new_module.rst
+++ b/doc/source/development/new_module.rst
@@ -24,8 +24,8 @@ like this::
 
   from __future__ import absolute_import, division, print_function
 
-  from numpy.testing import Tester
-  test = Tester().test
+  from skbio.util import TestRunner
+  test = TestRunner(__file__).test
 
 Usually, some functionality from the module will be made accessible by
 importing it in `__init__.py`. It's convenient to use explicit
@@ -45,5 +45,7 @@ necessary so that all tests can be run after installation)::
   # The full license is in the file COPYING.txt, distributed with this software.
   # ----------------------------------------------------------------------------
 
+  from __future__ import absolute_import, division, print_function
+
 Finally, remember to also follow the `documentation guidelines
 <https://github.com/biocore/scikit-bio/blob/master/doc/README.md#documenting-a-module-in-scikit-bio>`_.
diff --git a/doc/source/development/py3.rst b/doc/source/development/py3.rst
index af66c34..98fcb18 100644
--- a/doc/source/development/py3.rst
+++ b/doc/source/development/py3.rst
@@ -180,7 +180,7 @@ StringIO and BytesIO
 
 In Py2 there are three flavours of StringIO: a pure Python module
 (StringIO), an accelerated version (cStringIO), and another one in the
-io module. They all behave in a slightly different way, with differnt
+io module. They all behave in a slightly different way, with different
 memory and performance characteristics. So far, we're using::
 
     from six import StringIO
@@ -294,7 +294,7 @@ When testing if a variable is a string use
 Numbers
 -------
 
-The `long` type no longer exists in Py2. To test if a number is an
+The `long` type no longer exists in Py3. To test if a number is an
 integer (`int` or `long` in Py2, `int` in Py3), compare it to
 the abstract base class `Integral`::
 
diff --git a/doc/source/format.sequences.rst b/doc/source/format.sequences.rst
deleted file mode 100644
index 522e0c0..0000000
--- a/doc/source/format.sequences.rst
+++ /dev/null
@@ -1 +0,0 @@
-.. automodule:: skbio.format.sequences
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 17a1c63..ff64847 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -11,18 +11,26 @@ API Reference
 .. toctree::
    :maxdepth: 1
 
-   alignment
+   io
    sequence
+   alignment
    tree
    workflow
    draw
-   format.sequences
    diversity
    stats
-   parse.sequences
-   io
    util
 
+User Documentation
+------------------
+
+The user documentation contains high-level information for users of scikit-bio.
+
+.. toctree::
+   :maxdepth: 1
+
+   user/api_stability
+
 Developer Documentation
 -----------------------
 
diff --git a/doc/source/parse.sequences.rst b/doc/source/parse.sequences.rst
deleted file mode 100644
index c704610..0000000
--- a/doc/source/parse.sequences.rst
+++ /dev/null
@@ -1 +0,0 @@
-.. automodule:: skbio.parse.sequences
\ No newline at end of file
diff --git a/doc/source/user/api_stability.rst b/doc/source/user/api_stability.rst
new file mode 100644
index 0000000..d200e33
--- /dev/null
+++ b/doc/source/user/api_stability.rst
@@ -0,0 +1,69 @@
+API Stability
+=============
+
+All public functionality in scikit-bio has a defined stability state.
+These states inform users and developers to what extent they can rely on
+different APIs in the package.
+
+You can find out the stability state of public functionality by looking at its
+docstring, which is formatted based on
+`numpydoc <https://github.com/numpy/numpydoc>`_. This information will either
+be in the *Extended Summary* section of the docstring, or in the case of
+deprecation, this information will appear as a note following the *Short
+Summary*.
+
+The following diagram illustrates the API lifecycle in scikit-bio:
+
+.. image:: assets/api-lifecycle.png
+   :align: center
+
+Definitions of the stability states and the information associated with each
+follow.
+
+Stable
+------
+Functionality defined as stable is part of scikit-bio's backward-
+compatible API. Users can be confident that the API will not change without
+first passing through the deprecated state, typically for at least two
+release cycles. We make every effort to maintain the API of this code.
+
+The docstrings of stable functionality will indicate the first scikit-bio
+version where the functionality was considered stable.
+
+Experimental
+------------
+Functionality defined as experimental is being considered for addition to
+scikit-bio's stable API. Users are encouraged to use this code, but to be
+aware that its API may change or be removed. Experimental functionality
+will typically pass through the deprecated state before it is removed, but
+in rare cases it may be removed directly (for example, if a serious
+methodological flaw is discovered that makes the functionality
+scientifically invalid).
+
+The docstrings of experimental functionality will indicate the first
+scikit-bio version where the functionality was considered experimental.
+
+We aim to move functionality through the experimental phase quickly (for
+example, two releases before moving to stable), but we don't make specific
+promises about when experimental functionality will become stable. This
+aligns with our philosophy that we don't make promises about experimental
+APIs, only about stable APIs.
+
+Deprecated
+----------
+Functionality defined as deprecated is targeted for removal from
+scikit-bio. Users should transition away from using it.
+
+The docstrings of deprecated functionality will indicate the first version
+of scikit-bio where the functionality was deprecated, the version of
+scikit-bio when the functionality will be removed, and the reason for
+deprecation of the code (for example, because a function was determined to
+be scientifically invalid, or because the API was adapted, and users should
+be using a different version of the function).
+
+Using deprecated functionality will raise a ``DeprecationWarning``. Since
+Python 2.7, these types of warnings are **silenced by default**. When
+developing a tool that uses scikit-bio, we recommend enabling the display of
+deprecation warnings to be informed of upcoming API changes. For details on how
+to display deprecation warnings, see `Python's deprecation warning docs
+<https://docs.python.org/3/whatsnew/2.7.html#changes-to-the-handling-of-deprecation-warnings>`_.
diff --git a/doc/source/user/assets/api-lifecycle.png b/doc/source/user/assets/api-lifecycle.png
new file mode 100644
index 0000000..d879a18
Binary files /dev/null and b/doc/source/user/assets/api-lifecycle.png differ
diff --git a/doc/sphinxext/numpydoc/LICENSE.txt b/doc/sphinxext/numpydoc/LICENSE.txt
deleted file mode 100644
index b15c699..0000000
--- a/doc/sphinxext/numpydoc/LICENSE.txt
+++ /dev/null
@@ -1,94 +0,0 @@
--------------------------------------------------------------------------------
-    The files
-    - numpydoc.py
-    - docscrape.py
-    - docscrape_sphinx.py
-    - phantom_import.py
-    have the following license:
-
-Copyright (C) 2008 Stefan van der Walt <stefan at mentat.za.net>, Pauli Virtanen <pav at iki.fi>
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in
-    the documentation and/or other materials provided with the
-    distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
--------------------------------------------------------------------------------
-    The files
-    - compiler_unparse.py
-    - comment_eater.py
-    - traitsdoc.py
-    have the following license:
-
-This software is OSI Certified Open Source Software.
-OSI Certified is a certification mark of the Open Source Initiative.
-
-Copyright (c) 2006, Enthought, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
- * Neither the name of Enthought, Inc. nor the names of its contributors may
-   be used to endorse or promote products derived from this software without
-   specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
--------------------------------------------------------------------------------
-    The file
-    - plot_directive.py
-    originates from Matplotlib (http://matplotlib.sf.net/) which has
-    the following license:
-
-Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved.
-
-1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation.
-
-2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98 [...]
-
-3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3.
-
-4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
-
-5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
-
-6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
-
-7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
-
-8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement.
-
diff --git a/doc/sphinxext/numpydoc/README.rst b/doc/sphinxext/numpydoc/README.rst
deleted file mode 100644
index 0c40af1..0000000
--- a/doc/sphinxext/numpydoc/README.rst
+++ /dev/null
@@ -1,54 +0,0 @@
-.. image:: https://travis-ci.org/numpy/numpydoc.png?branch=master
-   :target: https://travis-ci.org/numpy/numpydoc/
-
-=====================================
-numpydoc -- Numpy's Sphinx extensions
-=====================================
-
-Numpy's documentation uses several custom extensions to Sphinx.  These
-are shipped in this ``numpydoc`` package, in case you want to make use
-of them in third-party projects.
-
-The following extensions are available:
-
-  - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add
-    the code description directives ``np:function``, ``np-c:function``, etc.
-    that support the Numpy docstring syntax.
-
-  - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes.
-
-  - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::``
-    directive. Note that this implementation may still undergo severe
-    changes or eventually be deprecated.
-
-
-numpydoc
-========
-
-Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings
-following the Numpy/Scipy format to a form palatable to Sphinx.
-
-Options
--------
-
-The following options can be set in conf.py:
-
-- numpydoc_use_plots: bool
-
-  Whether to produce ``plot::`` directives for Examples sections that
-  contain ``import matplotlib``.
-
-- numpydoc_show_class_members: bool
-
-  Whether to show all members of a class in the Methods and Attributes
-  sections automatically.
-
-- numpydoc_class_members_toctree: bool
-
-  Whether to create a Sphinx table of contents for the lists of class
-  methods and attributes. If a table of contents is made, Sphinx expects
-  each entry to have a separate page.
-
-- numpydoc_edit_link: bool  (DEPRECATED -- edit your HTML template instead)
-
-  Whether to insert an edit link after docstrings.
diff --git a/doc/sphinxext/numpydoc/numpydoc/__init__.py b/doc/sphinxext/numpydoc/numpydoc/__init__.py
deleted file mode 100644
index 0fce2cf..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-from .numpydoc import setup
diff --git a/doc/sphinxext/numpydoc/numpydoc/comment_eater.py b/doc/sphinxext/numpydoc/numpydoc/comment_eater.py
deleted file mode 100644
index 8cddd33..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/comment_eater.py
+++ /dev/null
@@ -1,169 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
-if sys.version_info[0] >= 3:
-    from io import StringIO
-else:
-    from io import StringIO
-
-import compiler
-import inspect
-import textwrap
-import tokenize
-
-from .compiler_unparse import unparse
-
-
-class Comment(object):
-    """ A comment block.
-    """
-    is_comment = True
-    def __init__(self, start_lineno, end_lineno, text):
-        # int : The first line number in the block. 1-indexed.
-        self.start_lineno = start_lineno
-        # int : The last line number. Inclusive!
-        self.end_lineno = end_lineno
-        # str : The text block including '#' character but not any leading spaces.
-        self.text = text
-
-    def add(self, string, start, end, line):
-        """ Add a new comment line.
-        """
-        self.start_lineno = min(self.start_lineno, start[0])
-        self.end_lineno = max(self.end_lineno, end[0])
-        self.text += string
-
-    def __repr__(self):
-        return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno, self.text)
-
-
-class NonComment(object):
-    """ A non-comment block of code.
-    """
-    is_comment = False
-    def __init__(self, start_lineno, end_lineno):
-        self.start_lineno = start_lineno
-        self.end_lineno = end_lineno
-
-    def add(self, string, start, end, line):
-        """ Add lines to the block.
-        """
-        if string.strip():
-            # Only add if not entirely whitespace.
-            self.start_lineno = min(self.start_lineno, start[0])
-            self.end_lineno = max(self.end_lineno, end[0])
-
-    def __repr__(self):
-        return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno)
-
-
-class CommentBlocker(object):
-    """ Pull out contiguous comment blocks.
-    """
-    def __init__(self):
-        # Start with a dummy.
-        self.current_block = NonComment(0, 0)
-
-        # All of the blocks seen so far.
-        self.blocks = []
-
-        # The index mapping lines of code to their associated comment blocks.
-        self.index = {}
-
-    def process_file(self, file):
-        """ Process a file object.
-        """
-        if sys.version_info[0] >= 3:
-            nxt = file.__next__
-        else:
-            nxt = file.next
-        for token in tokenize.generate_tokens(nxt):
-            self.process_token(*token)
-        self.make_index()
-
-    def process_token(self, kind, string, start, end, line):
-        """ Process a single token.
-        """
-        if self.current_block.is_comment:
-            if kind == tokenize.COMMENT:
-                self.current_block.add(string, start, end, line)
-            else:
-                self.new_noncomment(start[0], end[0])
-        else:
-            if kind == tokenize.COMMENT:
-                self.new_comment(string, start, end, line)
-            else:
-                self.current_block.add(string, start, end, line)
-
-    def new_noncomment(self, start_lineno, end_lineno):
-        """ We are transitioning from a noncomment to a comment.
-        """
-        block = NonComment(start_lineno, end_lineno)
-        self.blocks.append(block)
-        self.current_block = block
-
-    def new_comment(self, string, start, end, line):
-        """ Possibly add a new comment.
-
-        Only adds a new comment if this comment is the only thing on the line.
-        Otherwise, it extends the noncomment block.
-        """
-        prefix = line[:start[1]]
-        if prefix.strip():
-            # Oops! Trailing comment, not a comment block.
-            self.current_block.add(string, start, end, line)
-        else:
-            # A comment block.
-            block = Comment(start[0], end[0], string)
-            self.blocks.append(block)
-            self.current_block = block
-
-    def make_index(self):
-        """ Make the index mapping lines of actual code to their associated
-        prefix comments.
-        """
-        for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
-            if not block.is_comment:
-                self.index[block.start_lineno] = prev
-
-    def search_for_comment(self, lineno, default=None):
-        """ Find the comment block just before the given line number.
-
-        Returns None (or the specified default) if there is no such block.
-        """
-        if not self.index:
-            self.make_index()
-        block = self.index.get(lineno, None)
-        text = getattr(block, 'text', default)
-        return text
-
-
-def strip_comment_marker(text):
-    """ Strip # markers at the front of a block of comment text.
-    """
-    lines = []
-    for line in text.splitlines():
-        lines.append(line.lstrip('#'))
-    text = textwrap.dedent('\n'.join(lines))
-    return text
-
-
-def get_class_traits(klass):
-    """ Yield all of the documentation for trait definitions on a class object.
-    """
-    # FIXME: gracefully handle errors here or in the caller?
-    source = inspect.getsource(klass)
-    cb = CommentBlocker()
-    cb.process_file(StringIO(source))
-    mod_ast = compiler.parse(source)
-    class_ast = mod_ast.node.nodes[0]
-    for node in class_ast.code.nodes:
-        # FIXME: handle other kinds of assignments?
-        if isinstance(node, compiler.ast.Assign):
-            name = node.nodes[0].name
-            rhs = unparse(node.expr).strip()
-            doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
-            yield name, rhs, doc
-
diff --git a/doc/sphinxext/numpydoc/numpydoc/compiler_unparse.py b/doc/sphinxext/numpydoc/numpydoc/compiler_unparse.py
deleted file mode 100644
index 8933a83..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/compiler_unparse.py
+++ /dev/null
@@ -1,865 +0,0 @@
-""" Turn compiler.ast structures back into executable python code.
-
-    The unparse method takes a compiler.ast tree and transforms it back into
-    valid python code.  It is incomplete and currently only works for
-    import statements, function calls, function definitions, assignments, and
-    basic expressions.
-
-    Inspired by python-2.5-svn/Demo/parser/unparse.py
-
-    fixme: We may want to move to using _ast trees because the compiler for
-           them is about 6 times faster than compiler.compile.
-"""
-from __future__ import division, absolute_import, print_function
-
-import sys
-from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
-
-if sys.version_info[0] >= 3:
-    from io import StringIO
-else:
-    from StringIO import StringIO
-
-def unparse(ast, single_line_functions=False):
-    s = StringIO()
-    UnparseCompilerAst(ast, s, single_line_functions)
-    return s.getvalue().lstrip()
-
-op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
-                  'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
-
-class UnparseCompilerAst:
-    """ Methods in this class recursively traverse an AST and
-        output source code for the abstract syntax; original formatting
-        is disregarged.
-    """
-
-    #########################################################################
-    # object interface.
-    #########################################################################
-
-    def __init__(self, tree, file = sys.stdout, single_line_functions=False):
-        """ Unparser(tree, file=sys.stdout) -> None.
-
-            Print the source for tree to file.
-        """
-        self.f = file
-        self._single_func = single_line_functions
-        self._do_indent = True
-        self._indent = 0
-        self._dispatch(tree)
-        self._write("\n")
-        self.f.flush()
-
-    #########################################################################
-    # Unparser private interface.
-    #########################################################################
-
-    ### format, output, and dispatch methods ################################
-
-    def _fill(self, text = ""):
-        "Indent a piece of text, according to the current indentation level"
-        if self._do_indent:
-            self._write("\n"+"    "*self._indent + text)
-        else:
-            self._write(text)
-
-    def _write(self, text):
-        "Append a piece of text to the current line."
-        self.f.write(text)
-
-    def _enter(self):
-        "Print ':', and increase the indentation."
-        self._write(": ")
-        self._indent += 1
-
-    def _leave(self):
-        "Decrease the indentation level."
-        self._indent -= 1
-
-    def _dispatch(self, tree):
-        "_dispatcher function, _dispatching tree type T to method _T."
-        if isinstance(tree, list):
-            for t in tree:
-                self._dispatch(t)
-            return
-        meth = getattr(self, "_"+tree.__class__.__name__)
-        if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
-            return
-        meth(tree)
-
-
-    #########################################################################
-    # compiler.ast unparsing methods.
-    #
-    # There should be one method per concrete grammar type. They are
-    # organized in alphabetical order.
-    #########################################################################
-
-    def _Add(self, t):
-        self.__binary_op(t, '+')
-
-    def _And(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") and (")
-        self._write(")")
-
-    def _AssAttr(self, t):
-        """ Handle assigning an attribute of an object
-        """
-        self._dispatch(t.expr)
-        self._write('.'+t.attrname)
-
-    def _Assign(self, t):
-        """ Expression Assignment such as "a = 1".
-
-            This only handles assignment in expressions.  Keyword assignment
-            is handled separately.
-        """
-        self._fill()
-        for target in t.nodes:
-            self._dispatch(target)
-            self._write(" = ")
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write('; ')
-
-    def _AssName(self, t):
-        """ Name on left hand side of expression.
-
-            Treat just like a name on the right side of an expression.
-        """
-        self._Name(t)
-
-    def _AssTuple(self, t):
-        """ Tuple on left hand side of an expression.
-        """
-
-        # _write each elements, separated by a comma.
-        for element in t.nodes[:-1]:
-            self._dispatch(element)
-            self._write(", ")
-
-        # Handle the last one without writing comma
-        last_element = t.nodes[-1]
-        self._dispatch(last_element)
-
-    def _AugAssign(self, t):
-        """ +=,-=,*=,/=,**=, etc. operations
-        """
-
-        self._fill()
-        self._dispatch(t.node)
-        self._write(' '+t.op+' ')
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write(';')
-
-    def _Bitand(self, t):
-        """ Bit and operation.
-        """
-
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" & ")
-
-    def _Bitor(self, t):
-        """ Bit or operation
-        """
-
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" | ")
-
-    def _CallFunc(self, t):
-        """ Function call.
-        """
-        self._dispatch(t.node)
-        self._write("(")
-        comma = False
-        for e in t.args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._dispatch(e)
-        if t.star_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("*")
-            self._dispatch(t.star_args)
-        if t.dstar_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("**")
-            self._dispatch(t.dstar_args)
-        self._write(")")
-
-    def _Compare(self, t):
-        self._dispatch(t.expr)
-        for op, expr in t.ops:
-            self._write(" " + op + " ")
-            self._dispatch(expr)
-
-    def _Const(self, t):
-        """ A constant value such as an integer value, 3, or a string, "hello".
-        """
-        self._dispatch(t.value)
-
-    def _Decorators(self, t):
-        """ Handle function decorators (eg. @has_units)
-        """
-        for node in t.nodes:
-            self._dispatch(node)
-
-    def _Dict(self, t):
-        self._write("{")
-        for  i, (k, v) in enumerate(t.items):
-            self._dispatch(k)
-            self._write(": ")
-            self._dispatch(v)
-            if i < len(t.items)-1:
-                self._write(", ")
-        self._write("}")
-
-    def _Discard(self, t):
-        """ Node for when return value is ignored such as in "foo(a)".
-        """
-        self._fill()
-        self._dispatch(t.expr)
-
-    def _Div(self, t):
-        self.__binary_op(t, '/')
-
-    def _Ellipsis(self, t):
-        self._write("...")
-
-    def _From(self, t):
-        """ Handle "from xyz import foo, bar as baz".
-        """
-        # fixme: Are From and ImportFrom handled differently?
-        self._fill("from ")
-        self._write(t.modname)
-        self._write(" import ")
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-
-    def _Function(self, t):
-        """ Handle function definitions
-        """
-        if t.decorators is not None:
-            self._fill("@")
-            self._dispatch(t.decorators)
-        self._fill("def "+t.name + "(")
-        defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
-        for i, arg in enumerate(zip(t.argnames, defaults)):
-            self._write(arg[0])
-            if arg[1] is not None:
-                self._write('=')
-                self._dispatch(arg[1])
-            if i < len(t.argnames)-1:
-                self._write(', ')
-        self._write(")")
-        if self._single_func:
-            self._do_indent = False
-        self._enter()
-        self._dispatch(t.code)
-        self._leave()
-        self._do_indent = True
-
-    def _Getattr(self, t):
-        """ Handle getting an attribute of an object
-        """
-        if isinstance(t.expr, (Div, Mul, Sub, Add)):
-            self._write('(')
-            self._dispatch(t.expr)
-            self._write(')')
-        else:
-            self._dispatch(t.expr)
-            
-        self._write('.'+t.attrname)
-        
-    def _If(self, t):
-        self._fill()
-        
-        for i, (compare,code) in enumerate(t.tests):
-            if i == 0:
-                self._write("if ")
-            else:
-                self._write("elif ")
-            self._dispatch(compare)
-            self._enter()
-            self._fill()
-            self._dispatch(code)
-            self._leave()
-            self._write("\n")
-
-        if t.else_ is not None:
-            self._write("else")
-            self._enter()
-            self._fill()
-            self._dispatch(t.else_)
-            self._leave()
-            self._write("\n")
-            
-    def _IfExp(self, t):
-        self._dispatch(t.then)
-        self._write(" if ")
-        self._dispatch(t.test)
-
-        if t.else_ is not None:
-            self._write(" else (")
-            self._dispatch(t.else_)
-            self._write(")")
-
-    def _Import(self, t):
-        """ Handle "import xyz.foo".
-        """
-        self._fill("import ")
-        
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-
-    def _Keyword(self, t):
-        """ Keyword value assignment within function calls and definitions.
-        """
-        self._write(t.name)
-        self._write("=")
-        self._dispatch(t.expr)
-        
-    def _List(self, t):
-        self._write("[")
-        for  i,node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i < len(t.nodes)-1:
-                self._write(", ")
-        self._write("]")
-
-    def _Module(self, t):
-        if t.doc is not None:
-            self._dispatch(t.doc)
-        self._dispatch(t.node)
-
-    def _Mul(self, t):
-        self.__binary_op(t, '*')
-
-    def _Name(self, t):
-        self._write(t.name)
-
-    def _NoneType(self, t):
-        self._write("None")
-        
-    def _Not(self, t):
-        self._write('not (')
-        self._dispatch(t.expr)
-        self._write(')')
-        
-    def _Or(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") or (")
-        self._write(")")
-                
-    def _Pass(self, t):
-        self._write("pass\n")
-
-    def _Printnl(self, t):
-        self._fill("print ")
-        if t.dest:
-            self._write(">> ")
-            self._dispatch(t.dest)
-            self._write(", ")
-        comma = False
-        for node in t.nodes:
-            if comma: self._write(', ')
-            else: comma = True
-            self._dispatch(node)
-
-    def _Power(self, t):
-        self.__binary_op(t, '**')
-
-    def _Return(self, t):
-        self._fill("return ")
-        if t.value:
-            if isinstance(t.value, Tuple):
-                text = ', '.join([ name.name for name in t.value.asList() ])
-                self._write(text)
-            else:
-                self._dispatch(t.value)
-            if not self._do_indent:
-                self._write('; ')
-
-    def _Slice(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        if t.lower:
-            self._dispatch(t.lower)
-        self._write(":")
-        if t.upper:
-            self._dispatch(t.upper)
-        #if t.step:
-        #    self._write(":")
-        #    self._dispatch(t.step)
-        self._write("]")
-
-    def _Sliceobj(self, t):
-        for i, node in enumerate(t.nodes):
-            if i != 0:
-                self._write(":")
-            if not (isinstance(node, Const) and node.value is None):
-                self._dispatch(node)
-
-    def _Stmt(self, tree):
-        for node in tree.nodes:
-            self._dispatch(node)
-
-    def _Sub(self, t):
-        self.__binary_op(t, '-')
-
-    def _Subscript(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        for i, value in enumerate(t.subs):
-            if i != 0:
-                self._write(",")
-            self._dispatch(value)
-        self._write("]")
-
-    def _TryExcept(self, t):
-        self._fill("try")
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-
-        for handler in t.handlers:
-            self._fill('except ')
-            self._dispatch(handler[0])
-            if handler[1] is not None:
-                self._write(', ')
-                self._dispatch(handler[1])
-            self._enter()
-            self._dispatch(handler[2])
-            self._leave()
-            
-        if t.else_:
-            self._fill("else")
-            self._enter()
-            self._dispatch(t.else_)
-            self._leave()
-
-    def _Tuple(self, t):
-
-        if not t.nodes:
-            # Empty tuple.
-            self._write("()")
-        else:
-            self._write("(")
-
-            # _write each elements, separated by a comma.
-            for element in t.nodes[:-1]:
-                self._dispatch(element)
-                self._write(", ")
-
-            # Handle the last one without writing comma
-            last_element = t.nodes[-1]
-            self._dispatch(last_element)
-
-            self._write(")")
-            
-    def _UnaryAdd(self, t):
-        self._write("+")
-        self._dispatch(t.expr)
-        
-    def _UnarySub(self, t):
-        self._write("-")
-        self._dispatch(t.expr)        
-
-    def _With(self, t):
-        self._fill('with ')
-        self._dispatch(t.expr)
-        if t.vars:
-            self._write(' as ')
-            self._dispatch(t.vars.name)
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-        self._write('\n')
-        
-    def _int(self, t):
-        self._write(repr(t))
-
-    def __binary_op(self, t, symbol):
-        # Check if parenthesis are needed on left side and then dispatch
-        has_paren = False
-        left_class = str(t.left.__class__)
-        if (left_class in op_precedence.keys() and
-            op_precedence[left_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.left)
-        if has_paren:
-            self._write(')')
-        # Write the appropriate symbol for operator
-        self._write(symbol)
-        # Check if parenthesis are needed on the right side and then dispatch
-        has_paren = False
-        right_class = str(t.right.__class__)
-        if (right_class in op_precedence.keys() and
-            op_precedence[right_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.right)
-        if has_paren:
-            self._write(')')
-
-    def _float(self, t):
-        # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
-        # We prefer str here.
-        self._write(str(t))
-
-    def _str(self, t):
-        self._write(repr(t))
-        
-    def _tuple(self, t):
-        self._write(str(t))
-
-    #########################################################################
-    # These are the methods from the _ast modules unparse.
-    #
-    # As our needs to handle more advanced code increase, we may want to
-    # modify some of the methods below so that they work for compiler.ast.
-    #########################################################################
-
-#    # stmt
-#    def _Expr(self, tree):
-#        self._fill()
-#        self._dispatch(tree.value)
-#
-#    def _Import(self, t):
-#        self._fill("import ")
-#        first = True
-#        for a in t.names:
-#            if first:
-#                first = False
-#            else:
-#                self._write(", ")
-#            self._write(a.name)
-#            if a.asname:
-#                self._write(" as "+a.asname)
-#
-##    def _ImportFrom(self, t):
-##        self._fill("from ")
-##        self._write(t.module)
-##        self._write(" import ")
-##        for i, a in enumerate(t.names):
-##            if i == 0:
-##                self._write(", ")
-##            self._write(a.name)
-##            if a.asname:
-##                self._write(" as "+a.asname)
-##        # XXX(jpe) what is level for?
-##
-#
-#    def _Break(self, t):
-#        self._fill("break")
-#
-#    def _Continue(self, t):
-#        self._fill("continue")
-#
-#    def _Delete(self, t):
-#        self._fill("del ")
-#        self._dispatch(t.targets)
-#
-#    def _Assert(self, t):
-#        self._fill("assert ")
-#        self._dispatch(t.test)
-#        if t.msg:
-#            self._write(", ")
-#            self._dispatch(t.msg)
-#
-#    def _Exec(self, t):
-#        self._fill("exec ")
-#        self._dispatch(t.body)
-#        if t.globals:
-#            self._write(" in ")
-#            self._dispatch(t.globals)
-#        if t.locals:
-#            self._write(", ")
-#            self._dispatch(t.locals)
-#
-#    def _Print(self, t):
-#        self._fill("print ")
-#        do_comma = False
-#        if t.dest:
-#            self._write(">>")
-#            self._dispatch(t.dest)
-#            do_comma = True
-#        for e in t.values:
-#            if do_comma:self._write(", ")
-#            else:do_comma=True
-#            self._dispatch(e)
-#        if not t.nl:
-#            self._write(",")
-#
-#    def _Global(self, t):
-#        self._fill("global")
-#        for i, n in enumerate(t.names):
-#            if i != 0:
-#                self._write(",")
-#            self._write(" " + n)
-#
-#    def _Yield(self, t):
-#        self._fill("yield")
-#        if t.value:
-#            self._write(" (")
-#            self._dispatch(t.value)
-#            self._write(")")
-#
-#    def _Raise(self, t):
-#        self._fill('raise ')
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.inst:
-#            self._write(", ")
-#            self._dispatch(t.inst)
-#        if t.tback:
-#            self._write(", ")
-#            self._dispatch(t.tback)
-#
-#
-#    def _TryFinally(self, t):
-#        self._fill("try")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#        self._fill("finally")
-#        self._enter()
-#        self._dispatch(t.finalbody)
-#        self._leave()
-#
-#    def _excepthandler(self, t):
-#        self._fill("except ")
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.name:
-#            self._write(", ")
-#            self._dispatch(t.name)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _ClassDef(self, t):
-#        self._write("\n")
-#        self._fill("class "+t.name)
-#        if t.bases:
-#            self._write("(")
-#            for a in t.bases:
-#                self._dispatch(a)
-#                self._write(", ")
-#            self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _FunctionDef(self, t):
-#        self._write("\n")
-#        for deco in t.decorators:
-#            self._fill("@")
-#            self._dispatch(deco)
-#        self._fill("def "+t.name + "(")
-#        self._dispatch(t.args)
-#        self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _For(self, t):
-#        self._fill("for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    def _While(self, t):
-#        self._fill("while ")
-#        self._dispatch(t.test)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    # expr
-#    def _Str(self, tree):
-#        self._write(repr(tree.s))
-##
-#    def _Repr(self, t):
-#        self._write("`")
-#        self._dispatch(t.value)
-#        self._write("`")
-#
-#    def _Num(self, t):
-#        self._write(repr(t.n))
-#
-#    def _ListComp(self, t):
-#        self._write("[")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write("]")
-#
-#    def _GeneratorExp(self, t):
-#        self._write("(")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write(")")
-#
-#    def _comprehension(self, t):
-#        self._write(" for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        for if_clause in t.ifs:
-#            self._write(" if ")
-#            self._dispatch(if_clause)
-#
-#    def _IfExp(self, t):
-#        self._dispatch(t.body)
-#        self._write(" if ")
-#        self._dispatch(t.test)
-#        if t.orelse:
-#            self._write(" else ")
-#            self._dispatch(t.orelse)
-#
-#    unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
-#    def _UnaryOp(self, t):
-#        self._write(self.unop[t.op.__class__.__name__])
-#        self._write("(")
-#        self._dispatch(t.operand)
-#        self._write(")")
-#
-#    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
-#                    "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
-#                    "FloorDiv":"//", "Pow": "**"}
-#    def _BinOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.left)
-#        self._write(")" + self.binop[t.op.__class__.__name__] + "(")
-#        self._dispatch(t.right)
-#        self._write(")")
-#
-#    boolops = {_ast.And: 'and', _ast.Or: 'or'}
-#    def _BoolOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.values[0])
-#        for v in t.values[1:]:
-#            self._write(" %s " % self.boolops[t.op.__class__])
-#            self._dispatch(v)
-#        self._write(")")
-#
-#    def _Attribute(self,t):
-#        self._dispatch(t.value)
-#        self._write(".")
-#        self._write(t.attr)
-#
-##    def _Call(self, t):
-##        self._dispatch(t.func)
-##        self._write("(")
-##        comma = False
-##        for e in t.args:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        for e in t.keywords:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        if t.starargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("*")
-##            self._dispatch(t.starargs)
-##        if t.kwargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("**")
-##            self._dispatch(t.kwargs)
-##        self._write(")")
-#
-#    # slice
-#    def _Index(self, t):
-#        self._dispatch(t.value)
-#
-#    def _ExtSlice(self, t):
-#        for i, d in enumerate(t.dims):
-#            if i != 0:
-#                self._write(': ')
-#            self._dispatch(d)
-#
-#    # others
-#    def _arguments(self, t):
-#        first = True
-#        nonDef = len(t.args)-len(t.defaults)
-#        for a in t.args[0:nonDef]:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a)
-#        for a,d in zip(t.args[nonDef:], t.defaults):
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a),
-#            self._write("=")
-#            self._dispatch(d)
-#        if t.vararg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("*"+t.vararg)
-#        if t.kwarg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("**"+t.kwarg)
-#
-##    def _keyword(self, t):
-##        self._write(t.arg)
-##        self._write("=")
-##        self._dispatch(t.value)
-#
-#    def _Lambda(self, t):
-#        self._write("lambda ")
-#        self._dispatch(t.args)
-#        self._write(": ")
-#        self._dispatch(t.body)
-
-
-
diff --git a/doc/sphinxext/numpydoc/numpydoc/docscrape.py b/doc/sphinxext/numpydoc/numpydoc/docscrape.py
deleted file mode 100644
index b31d06d..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/docscrape.py
+++ /dev/null
@@ -1,525 +0,0 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import inspect
-import textwrap
-import re
-import pydoc
-from warnings import warn
-import collections
-
-
-class Reader(object):
-    """A line-based string reader.
-
-    """
-    def __init__(self, data):
-        """
-        Parameters
-        ----------
-        data : str
-           String with lines separated by '\n'.
-
-        """
-        if isinstance(data,list):
-            self._str = data
-        else:
-            self._str = data.split('\n') # store string as list of lines
-
-        self.reset()
-
-    def __getitem__(self, n):
-        return self._str[n]
-
-    def reset(self):
-        self._l = 0 # current line nr
-
-    def read(self):
-        if not self.eof():
-            out = self[self._l]
-            self._l += 1
-            return out
-        else:
-            return ''
-
-    def seek_next_non_empty_line(self):
-        for l in self[self._l:]:
-            if l.strip():
-                break
-            else:
-                self._l += 1
-
-    def eof(self):
-        return self._l >= len(self._str)
-
-    def read_to_condition(self, condition_func):
-        start = self._l
-        for line in self[start:]:
-            if condition_func(line):
-                return self[start:self._l]
-            self._l += 1
-            if self.eof():
-                return self[start:self._l+1]
-        return []
-
-    def read_to_next_empty_line(self):
-        self.seek_next_non_empty_line()
-        def is_empty(line):
-            return not line.strip()
-        return self.read_to_condition(is_empty)
-
-    def read_to_next_unindented_line(self):
-        def is_unindented(line):
-            return (line.strip() and (len(line.lstrip()) == len(line)))
-        return self.read_to_condition(is_unindented)
-
-    def peek(self,n=0):
-        if self._l + n < len(self._str):
-            return self[self._l + n]
-        else:
-            return ''
-
-    def is_empty(self):
-        return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
-    def __init__(self, docstring, config={}):
-        docstring = textwrap.dedent(docstring).split('\n')
-
-        self._doc = Reader(docstring)
-        self._parsed_data = {
-            'Signature': '',
-            'Summary': [''],
-            'Extended Summary': [],
-            'Parameters': [],
-            'Returns': [],
-            'Raises': [],
-            'Warns': [],
-            'Other Parameters': [],
-            'Attributes': [],
-            'Methods': [],
-            'See Also': [],
-            'Notes': [],
-            'Warnings': [],
-            'References': '',
-            'Examples': '',
-            'index': {}
-            }
-
-        self._parse()
-
-    def __getitem__(self,key):
-        return self._parsed_data[key]
-
-    def __setitem__(self,key,val):
-        if key not in self._parsed_data:
-            warn("Unknown section %s" % key)
-        else:
-            self._parsed_data[key] = val
-
-    def _is_at_section(self):
-        self._doc.seek_next_non_empty_line()
-
-        if self._doc.eof():
-            return False
-
-        l1 = self._doc.peek().strip()  # e.g. Parameters
-
-        if l1.startswith('.. index::'):
-            return True
-
-        l2 = self._doc.peek(1).strip() #    ---------- or ==========
-        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
-    def _strip(self,doc):
-        i = 0
-        j = 0
-        for i,line in enumerate(doc):
-            if line.strip(): break
-
-        for j,line in enumerate(doc[::-1]):
-            if line.strip(): break
-
-        return doc[i:len(doc)-j]
-
-    def _read_to_next_section(self):
-        section = self._doc.read_to_next_empty_line()
-
-        while not self._is_at_section() and not self._doc.eof():
-            if not self._doc.peek(-1).strip(): # previous line was empty
-                section += ['']
-
-            section += self._doc.read_to_next_empty_line()
-
-        return section
-
-    def _read_sections(self):
-        while not self._doc.eof():
-            data = self._read_to_next_section()
-            name = data[0].strip()
-
-            if name.startswith('..'): # index section
-                yield name, data[1:]
-            elif len(data) < 2:
-                yield StopIteration
-            else:
-                yield name, self._strip(data[2:])
-
-    def _parse_param_list(self,content):
-        r = Reader(content)
-        params = []
-        while not r.eof():
-            header = r.read().strip()
-            if ' : ' in header:
-                arg_name, arg_type = header.split(' : ')[:2]
-            else:
-                arg_name, arg_type = header, ''
-
-            desc = r.read_to_next_unindented_line()
-            desc = dedent_lines(desc)
-
-            params.append((arg_name,arg_type,desc))
-
-        return params
-
-
-    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
-                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
-    def _parse_see_also(self, content):
-        """
-        func_name : Descriptive text
-            continued text
-        another_func_name : Descriptive text
-        func_name1, func_name2, :meth:`func_name`, func_name3
-
-        """
-        items = []
-
-        def parse_item_name(text):
-            """Match ':role:`name`' or 'name'"""
-            m = self._name_rgx.match(text)
-            if m:
-                g = m.groups()
-                if g[1] is None:
-                    return g[3], None
-                else:
-                    return g[2], g[1]
-            raise ValueError("%s is not a item name" % text)
-
-        def push_item(name, rest):
-            if not name:
-                return
-            name, role = parse_item_name(name)
-            items.append((name, list(rest), role))
-            del rest[:]
-
-        current_func = None
-        rest = []
-
-        for line in content:
-            if not line.strip(): continue
-
-            m = self._name_rgx.match(line)
-            if m and line[m.end():].strip().startswith(':'):
-                push_item(current_func, rest)
-                current_func, line = line[:m.end()], line[m.end():]
-                rest = [line.split(':', 1)[1].strip()]
-                if not rest[0]:
-                    rest = []
-            elif not line.startswith(' '):
-                push_item(current_func, rest)
-                current_func = None
-                if ',' in line:
-                    for func in line.split(','):
-                        if func.strip():
-                            push_item(func, [])
-                elif line.strip():
-                    current_func = line
-            elif current_func is not None:
-                rest.append(line.strip())
-        push_item(current_func, rest)
-        return items
-
-    def _parse_index(self, section, content):
-        """
-        .. index: default
-           :refguide: something, else, and more
-
-        """
-        def strip_each_in(lst):
-            return [s.strip() for s in lst]
-
-        out = {}
-        section = section.split('::')
-        if len(section) > 1:
-            out['default'] = strip_each_in(section[1].split(','))[0]
-        for line in content:
-            line = line.split(':')
-            if len(line) > 2:
-                out[line[1]] = strip_each_in(line[2].split(','))
-        return out
-
-    def _parse_summary(self):
-        """Grab signature (if given) and summary"""
-        if self._is_at_section():
-            return
-
-        # If several signatures present, take the last one
-        while True:
-            summary = self._doc.read_to_next_empty_line()
-            summary_str = " ".join([s.strip() for s in summary]).strip()
-            if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
-                self['Signature'] = summary_str
-                if not self._is_at_section():
-                    continue
-            break
-
-        if summary is not None:
-            self['Summary'] = summary
-
-        if not self._is_at_section():
-            self['Extended Summary'] = self._read_to_next_section()
-
-    def _parse(self):
-        self._doc.reset()
-        self._parse_summary()
-
-        for (section,content) in self._read_sections():
-            if not section.startswith('..'):
-                section = ' '.join([s.capitalize() for s in section.split(' ')])
-            if section in ('Parameters', 'Returns', 'Raises', 'Warns',
-                           'Other Parameters', 'Attributes', 'Methods'):
-                self[section] = self._parse_param_list(content)
-            elif section.startswith('.. index::'):
-                self['index'] = self._parse_index(section, content)
-            elif section == 'See Also':
-                self['See Also'] = self._parse_see_also(content)
-            else:
-                self[section] = content
-
-    # string conversion routines
-
-    def _str_header(self, name, symbol='-'):
-        return [name, len(name)*symbol]
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        if self['Signature']:
-            return [self['Signature'].replace('*','\*')] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        if self['Summary']:
-            return self['Summary'] + ['']
-        else:
-            return []
-
-    def _str_extended_summary(self):
-        if self['Extended Summary']:
-            return self['Extended Summary'] + ['']
-        else:
-            return []
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            for param,param_type,desc in self[name]:
-                if param_type:
-                    out += ['%s : %s' % (param, param_type)]
-                else:
-                    out += [param]
-                out += self._str_indent(desc)
-            out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += self[name]
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        if not self['See Also']: return []
-        out = []
-        out += self._str_header("See Also")
-        last_had_desc = True
-        for func, desc, role in self['See Also']:
-            if role:
-                link = ':%s:`%s`' % (role, func)
-            elif func_role:
-                link = ':%s:`%s`' % (func_role, func)
-            else:
-                link = "`%s`_" % func
-            if desc or last_had_desc:
-                out += ['']
-                out += [link]
-            else:
-                out[-1] += ", %s" % link
-            if desc:
-                out += self._str_indent([' '.join(desc)])
-                last_had_desc = True
-            else:
-                last_had_desc = False
-        out += ['']
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.items():
-            if section == 'default':
-                continue
-            out += ['   :%s: %s' % (section, ', '.join(references))]
-        return out
-
-    def __str__(self, func_role=''):
-        out = []
-        out += self._str_signature()
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters', 'Returns', 'Other Parameters',
-                           'Raises', 'Warns'):
-            out += self._str_param_list(param_list)
-        out += self._str_section('Warnings')
-        out += self._str_see_also(func_role)
-        for s in ('Notes','References','Examples'):
-            out += self._str_section(s)
-        for param_list in ('Attributes', 'Methods'):
-            out += self._str_param_list(param_list)
-        out += self._str_index()
-        return '\n'.join(out)
-
-
-def indent(str,indent=4):
-    indent_str = ' '*indent
-    if str is None:
-        return indent_str
-    lines = str.split('\n')
-    return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
-    """Deindent a list of lines maximally"""
-    return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
-    return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
-    def __init__(self, func, role='func', doc=None, config={}):
-        self._f = func
-        self._role = role # e.g. "func" or "meth"
-
-        if doc is None:
-            if func is None:
-                raise ValueError("No function or docstring given")
-            doc = inspect.getdoc(func) or ''
-        NumpyDocString.__init__(self, doc)
-
-        if not self['Signature'] and func is not None:
-            func, func_name = self.get_func()
-            try:
-                # try to read signature
-                argspec = inspect.getargspec(func)
-                argspec = inspect.formatargspec(*argspec)
-                argspec = argspec.replace('*','\*')
-                signature = '%s%s' % (func_name, argspec)
-            except TypeError as e:
-                signature = '%s()' % func_name
-            self['Signature'] = signature
-
-    def get_func(self):
-        func_name = getattr(self._f, '__name__', self.__class__.__name__)
-        if inspect.isclass(self._f):
-            func = getattr(self._f, '__call__', self._f.__init__)
-        else:
-            func = self._f
-        return func, func_name
-
-    def __str__(self):
-        out = ''
-
-        func, func_name = self.get_func()
-        signature = self['Signature'].replace('*', '\*')
-
-        roles = {'func': 'function',
-                 'meth': 'method'}
-
-        if self._role:
-            if self._role not in roles:
-                print("Warning: invalid role %s" % self._role)
-            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
-                                             func_name)
-
-        out += super(FunctionDoc, self).__str__(func_role=self._role)
-        return out
-
-
-class ClassDoc(NumpyDocString):
-
-    def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
-                 config={}):
-        if not inspect.isclass(cls) and cls is not None:
-            raise ValueError("Expected a class or None, but got %r" % cls)
-        self._cls = cls
-
-        if modulename and not modulename.endswith('.'):
-            modulename += '.'
-        self._mod = modulename
-
-        if doc is None:
-            if cls is None:
-                raise ValueError("No class or documentation string given")
-            doc = pydoc.getdoc(cls)
-
-        NumpyDocString.__init__(self, doc)
-
-        if config.get('show_class_members', True):
-            def splitlines_x(s):
-                if not s:
-                    return []
-                else:
-                    return s.splitlines()
-
-            for field, items in [('Methods', self.methods),
-                                 ('Attributes', self.properties)]:
-                if not self[field]:
-                    doc_list = []
-                    for name in sorted(items):
-                         try:
-                            doc_item = pydoc.getdoc(getattr(self._cls, name))
-                            doc_list.append((name, '', splitlines_x(doc_item)))
-                         except AttributeError:
-                            pass # method doesn't exist
-                    self[field] = doc_list
-
-    @property
-    def methods(self):
-        if self._cls is None:
-            return []
-        return [name for name,func in inspect.getmembers(self._cls)
-                if ((not name.startswith('_') or
-                     '.. shownumpydoc' in pydoc.getdoc(func))
-                    and isinstance(func, collections.Callable))]
-
-    @property
-    def properties(self):
-        if self._cls is None:
-            return []
-        return [name for name,func in inspect.getmembers(self._cls)
-                if not name.startswith('_') and
-                (func is None or isinstance(func, property) or
-                 inspect.isgetsetdescriptor(func))]
diff --git a/doc/sphinxext/numpydoc/numpydoc/docscrape_sphinx.py b/doc/sphinxext/numpydoc/numpydoc/docscrape_sphinx.py
deleted file mode 100644
index cdc2a37..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/docscrape_sphinx.py
+++ /dev/null
@@ -1,274 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import sys, re, inspect, textwrap, pydoc
-import sphinx
-import collections
-from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
-
-if sys.version_info[0] >= 3:
-    sixu = lambda s: s
-else:
-    sixu = lambda s: unicode(s, 'unicode_escape')
-
-
-class SphinxDocString(NumpyDocString):
-    def __init__(self, docstring, config={}):
-        NumpyDocString.__init__(self, docstring, config=config)
-        self.load_config(config)
-
-    def load_config(self, config):
-        self.use_plots = config.get('use_plots', False)
-        self.class_members_toctree = config.get('class_members_toctree', True)
-
-    # string conversion routines
-    def _str_header(self, name, symbol='`'):
-        return ['.. rubric:: ' + name, '']
-
-    def _str_field_list(self, name):
-        return [':' + name + ':']
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        return ['']
-        if self['Signature']:
-            return ['``%s``' % self['Signature']] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        return self['Summary'] + ['']
-
-    def _str_extended_summary(self):
-        return self['Extended Summary'] + ['']
-
-    def _str_returns(self):
-        out = []
-        if self['Returns']:
-            out += self._str_field_list('Returns')
-            out += ['']
-            for param, param_type, desc in self['Returns']:
-                if param_type:
-                    out += self._str_indent(['**%s** : %s' % (param.strip(),
-                                                              param_type)])
-                else:
-                    out += self._str_indent([param.strip()])
-                if desc:
-                    out += ['']
-                    out += self._str_indent(desc, 8)
-                out += ['']
-        return out
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_field_list(name)
-            out += ['']
-            for param, param_type, desc in self[name]:
-                if param_type:
-                    out += self._str_indent(['**%s** : %s' % (param.strip(),
-                                                              param_type)])
-                else:
-                    out += self._str_indent(['**%s**' % param.strip()])
-                if desc:
-                    out += ['']
-                    out += self._str_indent(desc, 8)
-                out += ['']
-        return out
-
-    @property
-    def _obj(self):
-        if hasattr(self, '_cls'):
-            return self._cls
-        elif hasattr(self, '_f'):
-            return self._f
-        return None
-
-    def _str_member_list(self, name):
-        """
-        Generate a member listing, autosummary:: table where possible,
-        and a table where not.
-
-        """
-        out = []
-        if self[name]:
-            out += ['.. rubric:: %s' % name, '']
-            prefix = getattr(self, '_name', '')
-
-            if prefix:
-                prefix = '~%s.' % prefix
-
-            autosum = []
-            others = []
-            for param, param_type, desc in self[name]:
-                param = param.strip()
-
-                # Check if the referenced member can have a docstring or not
-                param_obj = getattr(self._obj, param, None)
-                if not (callable(param_obj)
-                        or isinstance(param_obj, property)
-                        or inspect.isgetsetdescriptor(param_obj)):
-                    param_obj = None
-
-                if param_obj and (pydoc.getdoc(param_obj) or not desc):
-                    # Referenced object has a docstring
-                    autosum += ["   %s%s" % (prefix, param)]
-                else:
-                    others.append((param, param_type, desc))
-
-            if autosum:
-                out += ['.. autosummary::']
-                if self.class_members_toctree:
-                    out += ['   :toctree:']
-                out += [''] + autosum
-
-            if others:
-                maxlen_0 = max(3, max([len(x[0]) for x in others]))
-                hdr = sixu("=")*maxlen_0 + sixu("  ") + sixu("=")*10
-                fmt = sixu('%%%ds  %%s  ') % (maxlen_0,)
-                out += ['', hdr]
-                for param, param_type, desc in others:
-                    desc = sixu(" ").join(x.strip() for x in desc).strip()
-                    if param_type:
-                        desc = "(%s) %s" % (param_type, desc)
-                    out += [fmt % (param.strip(), desc)]
-                out += [hdr]
-            out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += ['']
-            content = textwrap.dedent("\n".join(self[name])).split("\n")
-            out += content
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        out = []
-        if self['See Also']:
-            see_also = super(SphinxDocString, self)._str_see_also(func_role)
-            out = ['.. seealso::', '']
-            out += self._str_indent(see_also[2:])
-        return out
-
-    def _str_warnings(self):
-        out = []
-        if self['Warnings']:
-            out = ['.. warning::', '']
-            out += self._str_indent(self['Warnings'])
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        if len(idx) == 0:
-            return out
-
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.items():
-            if section == 'default':
-                continue
-            elif section == 'refguide':
-                out += ['   single: %s' % (', '.join(references))]
-            else:
-                out += ['   %s: %s' % (section, ','.join(references))]
-        return out
-
-    def _str_references(self):
-        out = []
-        if self['References']:
-            out += self._str_header('References')
-            if isinstance(self['References'], str):
-                self['References'] = [self['References']]
-            out.extend(self['References'])
-            out += ['']
-            # Latex collects all references to a separate bibliography,
-            # so we need to insert links to it
-            if sphinx.__version__ >= "0.6":
-                out += ['.. only:: latex','']
-            else:
-                out += ['.. latexonly::','']
-            items = []
-            for line in self['References']:
-                m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
-                if m:
-                    items.append(m.group(1))
-            out += ['   ' + ", ".join(["[%s]_" % item for item in items]), '']
-        return out
-
-    def _str_examples(self):
-        examples_str = "\n".join(self['Examples'])
-
-        if (self.use_plots and 'import matplotlib' in examples_str
-                and 'plot::' not in examples_str):
-            out = []
-            out += self._str_header('Examples')
-            out += ['.. plot::', '']
-            out += self._str_indent(self['Examples'])
-            out += ['']
-            return out
-        else:
-            return self._str_section('Examples')
-
-    def __str__(self, indent=0, func_role="obj"):
-        out = []
-        out += self._str_signature()
-        out += self._str_index() + ['']
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        out += self._str_param_list('Parameters')
-        out += self._str_returns()
-        for param_list in ('Other Parameters', 'Raises', 'Warns'):
-            out += self._str_param_list(param_list)
-        out += self._str_warnings()
-        out += self._str_see_also(func_role)
-        out += self._str_section('Notes')
-        out += self._str_references()
-        out += self._str_examples()
-        for param_list in ('Attributes', 'Methods'):
-            out += self._str_member_list(param_list)
-        out = self._str_indent(out,indent)
-        return '\n'.join(out)
-
-class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
-    def __init__(self, obj, doc=None, config={}):
-        self.load_config(config)
-        FunctionDoc.__init__(self, obj, doc=doc, config=config)
-
-class SphinxClassDoc(SphinxDocString, ClassDoc):
-    def __init__(self, obj, doc=None, func_doc=None, config={}):
-        self.load_config(config)
-        ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
-
-class SphinxObjDoc(SphinxDocString):
-    def __init__(self, obj, doc=None, config={}):
-        self._f = obj
-        self.load_config(config)
-        SphinxDocString.__init__(self, doc, config=config)
-
-def get_doc_object(obj, what=None, doc=None, config={}):
-    if what is None:
-        if inspect.isclass(obj):
-            what = 'class'
-        elif inspect.ismodule(obj):
-            what = 'module'
-        elif isinstance(obj, collections.Callable):
-            what = 'function'
-        else:
-            what = 'object'
-    if what == 'class':
-        return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
-                              config=config)
-    elif what in ('function', 'method'):
-        return SphinxFunctionDoc(obj, doc=doc, config=config)
-    else:
-        if doc is None:
-            doc = pydoc.getdoc(obj)
-        return SphinxObjDoc(obj, doc, config=config)
diff --git a/doc/sphinxext/numpydoc/numpydoc/linkcode.py b/doc/sphinxext/numpydoc/numpydoc/linkcode.py
deleted file mode 100644
index 1ad3ab8..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/linkcode.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    linkcode
-    ~~~~~~~~
-
-    Add external links to module code in Python object descriptions.
-
-    :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-import collections
-
-warnings.warn("This extension has been accepted to Sphinx upstream. "
-              "Use the version from there (Sphinx >= 1.2) "
-              "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode",
-              FutureWarning, stacklevel=1)
-
-
-from docutils import nodes
-
-from sphinx import addnodes
-from sphinx.locale import _
-from sphinx.errors import SphinxError
-
-class LinkcodeError(SphinxError):
-    category = "linkcode error"
-
-def doctree_read(app, doctree):
-    env = app.builder.env
-
-    resolve_target = getattr(env.config, 'linkcode_resolve', None)
-    if not isinstance(env.config.linkcode_resolve, collections.Callable):
-        raise LinkcodeError(
-            "Function `linkcode_resolve` is not given in conf.py")
-
-    domain_keys = dict(
-        py=['module', 'fullname'],
-        c=['names'],
-        cpp=['names'],
-        js=['object', 'fullname'],
-    )
-
-    for objnode in doctree.traverse(addnodes.desc):
-        domain = objnode.get('domain')
-        uris = set()
-        for signode in objnode:
-            if not isinstance(signode, addnodes.desc_signature):
-                continue
-
-            # Convert signode to a specified format
-            info = {}
-            for key in domain_keys.get(domain, []):
-                value = signode.get(key)
-                if not value:
-                    value = ''
-                info[key] = value
-            if not info:
-                continue
-
-            # Call user code to resolve the link
-            uri = resolve_target(domain, info)
-            if not uri:
-                # no source
-                continue
-
-            if uri in uris or not uri:
-                # only one link per name, please
-                continue
-            uris.add(uri)
-
-            onlynode = addnodes.only(expr='html')
-            onlynode += nodes.reference('', '', internal=False, refuri=uri)
-            onlynode[0] += nodes.inline('', _('[source]'),
-                                        classes=['viewcode-link'])
-            signode += onlynode
-
-def setup(app):
-    app.connect('doctree-read', doctree_read)
-    app.add_config_value('linkcode_resolve', None, '')
diff --git a/doc/sphinxext/numpydoc/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc/numpydoc.py
deleted file mode 100644
index 2bc2d1e..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/numpydoc.py
+++ /dev/null
@@ -1,187 +0,0 @@
-"""
-========
-numpydoc
-========
-
-Sphinx extension that handles docstrings in the Numpy standard format. [1]
-
-It will:
-
-- Convert Parameters etc. sections to field lists.
-- Convert See Also section to a See also entry.
-- Renumber references.
-- Extract the signature from the docstring, if it can't be determined otherwise.
-
-.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import os, sys, re, pydoc
-import sphinx
-import inspect
-import collections
-
-if sphinx.__version__ < '1.0.1':
-    raise RuntimeError("Sphinx 1.0.1 or newer is required")
-
-from .docscrape_sphinx import get_doc_object, SphinxDocString
-from sphinx.util.compat import Directive
-
-if sys.version_info[0] >= 3:
-    sixu = lambda s: s
-else:
-    sixu = lambda s: unicode(s, 'unicode_escape')
-
-
-def mangle_docstrings(app, what, name, obj, options, lines,
-                      reference_offset=[0]):
-
-    cfg = dict(use_plots=app.config.numpydoc_use_plots,
-               show_class_members=app.config.numpydoc_show_class_members,
-               class_members_toctree=app.config.numpydoc_class_members_toctree,
-              )
-
-    if what == 'module':
-        # Strip top title
-        title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'),
-                              re.I|re.S)
-        lines[:] = title_re.sub(sixu(''), sixu("\n").join(lines)).split(sixu("\n"))
-    else:
-        doc = get_doc_object(obj, what, sixu("\n").join(lines), config=cfg)
-        if sys.version_info[0] >= 3:
-            doc = str(doc)
-        else:
-            doc = unicode(doc)
-        lines[:] = doc.split(sixu("\n"))
-
-    if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
-           obj.__name__:
-        if hasattr(obj, '__module__'):
-            v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__))
-        else:
-            v = dict(full_name=obj.__name__)
-        lines += [sixu(''), sixu('.. htmlonly::'), sixu('')]
-        lines += [sixu('    %s') % x for x in
-                  (app.config.numpydoc_edit_link % v).split("\n")]
-
-    # replace reference numbers so that there are no duplicates
-    references = []
-    for line in lines:
-        line = line.strip()
-        m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I)
-        if m:
-            references.append(m.group(1))
-
-    # start renaming from the longest string, to avoid overwriting parts
-    references.sort(key=lambda x: -len(x))
-    if references:
-        for i, line in enumerate(lines):
-            for r in references:
-                if re.match(sixu('^\\d+$'), r):
-                    new_r = sixu("R%d") % (reference_offset[0] + int(r))
-                else:
-                    new_r = sixu("%s%d") % (r, reference_offset[0])
-                lines[i] = lines[i].replace(sixu('[%s]_') % r,
-                                            sixu('[%s]_') % new_r)
-                lines[i] = lines[i].replace(sixu('.. [%s]') % r,
-                                            sixu('.. [%s]') % new_r)
-
-    reference_offset[0] += len(references)
-
-def mangle_signature(app, what, name, obj, options, sig, retann):
-    # Do not try to inspect classes that don't define `__init__`
-    if (inspect.isclass(obj) and
-        (not hasattr(obj, '__init__') or
-        'initializes x; see ' in pydoc.getdoc(obj.__init__))):
-        return '', ''
-
-    if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return
-    if not hasattr(obj, '__doc__'): return
-
-    doc = SphinxDocString(pydoc.getdoc(obj))
-    if doc['Signature']:
-        sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature'])
-        return sig, sixu('')
-
-def setup(app, get_doc_object_=get_doc_object):
-    if not hasattr(app, 'add_config_value'):
-        return # probably called by nose, better bail out
-
-    global get_doc_object
-    get_doc_object = get_doc_object_
-
-    app.connect('autodoc-process-docstring', mangle_docstrings)
-    app.connect('autodoc-process-signature', mangle_signature)
-    app.add_config_value('numpydoc_edit_link', None, False)
-    app.add_config_value('numpydoc_use_plots', None, False)
-    app.add_config_value('numpydoc_show_class_members', True, True)
-    app.add_config_value('numpydoc_class_members_toctree', True, True)
-
-    # Extra mangling domains
-    app.add_domain(NumpyPythonDomain)
-    app.add_domain(NumpyCDomain)
-
-#------------------------------------------------------------------------------
-# Docstring-mangling domains
-#------------------------------------------------------------------------------
-
-from docutils.statemachine import ViewList
-from sphinx.domains.c import CDomain
-from sphinx.domains.python import PythonDomain
-
-class ManglingDomainBase(object):
-    directive_mangling_map = {}
-
-    def __init__(self, *a, **kw):
-        super(ManglingDomainBase, self).__init__(*a, **kw)
-        self.wrap_mangling_directives()
-
-    def wrap_mangling_directives(self):
-        for name, objtype in list(self.directive_mangling_map.items()):
-            self.directives[name] = wrap_mangling_directive(
-                self.directives[name], objtype)
-
-class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
-    name = 'np'
-    directive_mangling_map = {
-        'function': 'function',
-        'class': 'class',
-        'exception': 'class',
-        'method': 'function',
-        'classmethod': 'function',
-        'staticmethod': 'function',
-        'attribute': 'attribute',
-    }
-    indices = []
-
-class NumpyCDomain(ManglingDomainBase, CDomain):
-    name = 'np-c'
-    directive_mangling_map = {
-        'function': 'function',
-        'member': 'attribute',
-        'macro': 'function',
-        'type': 'class',
-        'var': 'object',
-    }
-
-def wrap_mangling_directive(base_directive, objtype):
-    class directive(base_directive):
-        def run(self):
-            env = self.state.document.settings.env
-
-            name = None
-            if self.arguments:
-                m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
-                name = m.group(2).strip()
-
-            if not name:
-                name = self.arguments[0]
-
-            lines = list(self.content)
-            mangle_docstrings(env.app, objtype, name, None, None, lines)
-            self.content = ViewList(lines, self.content.parent)
-
-            return base_directive.run(self)
-
-    return directive
diff --git a/doc/sphinxext/numpydoc/numpydoc/phantom_import.py b/doc/sphinxext/numpydoc/numpydoc/phantom_import.py
deleted file mode 100644
index 9a60b4a..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/phantom_import.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""
-==============
-phantom_import
-==============
-
-Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
-extensions to use docstrings loaded from an XML file.
-
-This extension loads an XML file in the Pydocweb format [1] and
-creates a dummy module that contains the specified docstrings. This
-can be used to get the current docstrings from a Pydocweb instance
-without needing to rebuild the documented module.
-
-.. [1] http://code.google.com/p/pydocweb
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import imp, sys, compiler, types, os, inspect, re
-
-def setup(app):
-    app.connect('builder-inited', initialize)
-    app.add_config_value('phantom_import_file', None, True)
-
-def initialize(app):
-    fn = app.config.phantom_import_file
-    if (fn and os.path.isfile(fn)):
-        print("[numpydoc] Phantom importing modules from", fn, "...")
-        import_phantom_module(fn)
-
-#------------------------------------------------------------------------------
-# Creating 'phantom' modules from an XML description
-#------------------------------------------------------------------------------
-def import_phantom_module(xml_file):
-    """
-    Insert a fake Python module to sys.modules, based on a XML file.
-
-    The XML file is expected to conform to Pydocweb DTD. The fake
-    module will contain dummy objects, which guarantee the following:
-
-    - Docstrings are correct.
-    - Class inheritance relationships are correct (if present in XML).
-    - Function argspec is *NOT* correct (even if present in XML).
-      Instead, the function signature is prepended to the function docstring.
-    - Class attributes are *NOT* correct; instead, they are dummy objects.
-
-    Parameters
-    ----------
-    xml_file : str
-        Name of an XML file to read
-    
-    """
-    import lxml.etree as etree
-
-    object_cache = {}
-
-    tree = etree.parse(xml_file)
-    root = tree.getroot()
-
-    # Sort items so that
-    # - Base classes come before classes inherited from them
-    # - Modules come before their contents
-    all_nodes = dict([(n.attrib['id'], n) for n in root])
-    
-    def _get_bases(node, recurse=False):
-        bases = [x.attrib['ref'] for x in node.findall('base')]
-        if recurse:
-            j = 0
-            while True:
-                try:
-                    b = bases[j]
-                except IndexError: break
-                if b in all_nodes:
-                    bases.extend(_get_bases(all_nodes[b]))
-                j += 1
-        return bases
-
-    type_index = ['module', 'class', 'callable', 'object']
-    
-    def base_cmp(a, b):
-        x = cmp(type_index.index(a.tag), type_index.index(b.tag))
-        if x != 0: return x
-
-        if a.tag == 'class' and b.tag == 'class':
-            a_bases = _get_bases(a, recurse=True)
-            b_bases = _get_bases(b, recurse=True)
-            x = cmp(len(a_bases), len(b_bases))
-            if x != 0: return x
-            if a.attrib['id'] in b_bases: return -1
-            if b.attrib['id'] in a_bases: return 1
-        
-        return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
-
-    nodes = root.getchildren()
-    nodes.sort(base_cmp)
-
-    # Create phantom items
-    for node in nodes:
-        name = node.attrib['id']
-        doc = (node.text or '').decode('string-escape') + "\n"
-        if doc == "\n": doc = ""
-
-        # create parent, if missing
-        parent = name
-        while True:
-            parent = '.'.join(parent.split('.')[:-1])
-            if not parent: break
-            if parent in object_cache: break
-            obj = imp.new_module(parent)
-            object_cache[parent] = obj
-            sys.modules[parent] = obj
-
-        # create object
-        if node.tag == 'module':
-            obj = imp.new_module(name)
-            obj.__doc__ = doc
-            sys.modules[name] = obj
-        elif node.tag == 'class':
-            bases = [object_cache[b] for b in _get_bases(node)
-                     if b in object_cache]
-            bases.append(object)
-            init = lambda self: None
-            init.__doc__ = doc
-            obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
-            obj.__name__ = name.split('.')[-1]
-        elif node.tag == 'callable':
-            funcname = node.attrib['id'].split('.')[-1]
-            argspec = node.attrib.get('argspec')
-            if argspec:
-                argspec = re.sub('^[^(]*', '', argspec)
-                doc = "%s%s\n\n%s" % (funcname, argspec, doc)
-            obj = lambda: 0
-            obj.__argspec_is_invalid_ = True
-            if sys.version_info[0] >= 3:
-                obj.__name__ = funcname
-            else:
-                obj.func_name = funcname
-            obj.__name__ = name
-            obj.__doc__ = doc
-            if inspect.isclass(object_cache[parent]):
-                obj.__objclass__ = object_cache[parent]
-        else:
-            class Dummy(object): pass
-            obj = Dummy()
-            obj.__name__ = name
-            obj.__doc__ = doc
-            if inspect.isclass(object_cache[parent]):
-                obj.__get__ = lambda: None
-        object_cache[name] = obj
-
-        if parent:
-            if inspect.ismodule(object_cache[parent]):
-                obj.__module__ = parent
-                setattr(object_cache[parent], name.split('.')[-1], obj)
-
-    # Populate items
-    for node in root:
-        obj = object_cache.get(node.attrib['id'])
-        if obj is None: continue
-        for ref in node.findall('ref'):
-            if node.tag == 'class':
-                if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
-                    setattr(obj, ref.attrib['name'],
-                            object_cache.get(ref.attrib['ref']))
-            else:
-                setattr(obj, ref.attrib['name'],
-                        object_cache.get(ref.attrib['ref']))
diff --git a/doc/sphinxext/numpydoc/numpydoc/plot_directive.py b/doc/sphinxext/numpydoc/numpydoc/plot_directive.py
deleted file mode 100644
index 2014f85..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/plot_directive.py
+++ /dev/null
@@ -1,642 +0,0 @@
-"""
-A special directive for generating a matplotlib plot.
-
-.. warning::
-
-   This is a hacked version of plot_directive.py from Matplotlib.
-   It's very much subject to change!
-
-
-Usage
------
-
-Can be used like this::
-
-    .. plot:: examples/example.py
-
-    .. plot::
-
-       import matplotlib.pyplot as plt
-       plt.plot([1,2,3], [4,5,6])
-
-    .. plot::
-
-       A plotting example:
-
-       >>> import matplotlib.pyplot as plt
-       >>> plt.plot([1,2,3], [4,5,6])
-
-The content is interpreted as doctest formatted if it has a line starting
-with ``>>>``.
-
-The ``plot`` directive supports the options
-
-    format : {'python', 'doctest'}
-        Specify the format of the input
-
-    include-source : bool
-        Whether to display the source code. Default can be changed in conf.py
-
-and the ``image`` directive options ``alt``, ``height``, ``width``,
-``scale``, ``align``, ``class``.
-
-Configuration options
----------------------
-
-The plot directive has the following configuration options:
-
-    plot_include_source
-        Default value for the include-source option
-
-    plot_pre_code
-        Code that should be executed before each plot.
-
-    plot_basedir
-        Base directory, to which plot:: file names are relative to.
-        (If None or empty, file names are relative to the directoly where
-        the file containing the directive is.)
-
-    plot_formats
-        File formats to generate. List of tuples or strings::
-
-            [(suffix, dpi), suffix, ...]
-
-        that determine the file format and the DPI. For entries whose
-        DPI was omitted, sensible defaults are chosen.
-
-    plot_html_show_formats
-        Whether to show links to the files in HTML.
-
-TODO
-----
-
-* Refactor Latex output; now it's plain images, but it would be nice
-  to make them appear side-by-side, or in floats.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
-import sphinx
-
-if sys.version_info[0] >= 3:
-    from io import StringIO
-else:
-    from io import StringIO
-
-import warnings
-warnings.warn("A plot_directive module is also available under "
-              "matplotlib.sphinxext; expect this numpydoc.plot_directive "
-              "module to be deprecated after relevant features have been "
-              "integrated there.",
-              FutureWarning, stacklevel=2)
-
-
-#------------------------------------------------------------------------------
-# Registration hook
-#------------------------------------------------------------------------------
-
-def setup(app):
-    setup.app = app
-    setup.config = app.config
-    setup.confdir = app.confdir
-
-    app.add_config_value('plot_pre_code', '', True)
-    app.add_config_value('plot_include_source', False, True)
-    app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
-    app.add_config_value('plot_basedir', None, True)
-    app.add_config_value('plot_html_show_formats', True, True)
-
-    app.add_directive('plot', plot_directive, True, (0, 1, False),
-                      **plot_directive_options)
-
-#------------------------------------------------------------------------------
-# plot:: directive
-#------------------------------------------------------------------------------
-from docutils.parsers.rst import directives
-from docutils import nodes
-
-def plot_directive(name, arguments, options, content, lineno,
-                   content_offset, block_text, state, state_machine):
-    return run(arguments, content, options, state_machine, state, lineno)
-plot_directive.__doc__ = __doc__
-
-def _option_boolean(arg):
-    if not arg or not arg.strip():
-        # no argument given, assume used as a flag
-        return True
-    elif arg.strip().lower() in ('no', '0', 'false'):
-        return False
-    elif arg.strip().lower() in ('yes', '1', 'true'):
-        return True
-    else:
-        raise ValueError('"%s" unknown boolean' % arg)
-
-def _option_format(arg):
-    return directives.choice(arg, ('python', 'lisp'))
-
-def _option_align(arg):
-    return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
-                                   "right"))
-
-plot_directive_options = {'alt': directives.unchanged,
-                          'height': directives.length_or_unitless,
-                          'width': directives.length_or_percentage_or_unitless,
-                          'scale': directives.nonnegative_int,
-                          'align': _option_align,
-                          'class': directives.class_option,
-                          'include-source': _option_boolean,
-                          'format': _option_format,
-                          }
-
-#------------------------------------------------------------------------------
-# Generating output
-#------------------------------------------------------------------------------
-
-from docutils import nodes, utils
-
-try:
-    # Sphinx depends on either Jinja or Jinja2
-    import jinja2
-    def format_template(template, **kw):
-        return jinja2.Template(template).render(**kw)
-except ImportError:
-    import jinja
-    def format_template(template, **kw):
-        return jinja.from_string(template, **kw)
-
-TEMPLATE = """
-{{ source_code }}
-
-{{ only_html }}
-
-   {% if source_link or (html_show_formats and not multi_image) %}
-   (
-   {%- if source_link -%}
-   `Source code <{{ source_link }}>`__
-   {%- endif -%}
-   {%- if html_show_formats and not multi_image -%}
-     {%- for img in images -%}
-       {%- for fmt in img.formats -%}
-         {%- if source_link or not loop.first -%}, {% endif -%}
-         `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
-       {%- endfor -%}
-     {%- endfor -%}
-   {%- endif -%}
-   )
-   {% endif %}
-
-   {% for img in images %}
-   .. figure:: {{ build_dir }}/{{ img.basename }}.png
-      {%- for option in options %}
-      {{ option }}
-      {% endfor %}
-
-      {% if html_show_formats and multi_image -%}
-        (
-        {%- for fmt in img.formats -%}
-        {%- if not loop.first -%}, {% endif -%}
-        `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
-        {%- endfor -%}
-        )
-      {%- endif -%}
-   {% endfor %}
-
-{{ only_latex }}
-
-   {% for img in images %}
-   .. image:: {{ build_dir }}/{{ img.basename }}.pdf
-   {% endfor %}
-
-"""
-
-class ImageFile(object):
-    def __init__(self, basename, dirname):
-        self.basename = basename
-        self.dirname = dirname
-        self.formats = []
-
-    def filename(self, format):
-        return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
-
-    def filenames(self):
-        return [self.filename(fmt) for fmt in self.formats]
-
-def run(arguments, content, options, state_machine, state, lineno):
-    if arguments and content:
-        raise RuntimeError("plot:: directive can't have both args and content")
-
-    document = state_machine.document
-    config = document.settings.env.config
-
-    options.setdefault('include-source', config.plot_include_source)
-
-    # determine input
-    rst_file = document.attributes['source']
-    rst_dir = os.path.dirname(rst_file)
-
-    if arguments:
-        if not config.plot_basedir:
-            source_file_name = os.path.join(rst_dir,
-                                            directives.uri(arguments[0]))
-        else:
-            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
-                                            directives.uri(arguments[0]))
-        code = open(source_file_name, 'r').read()
-        output_base = os.path.basename(source_file_name)
-    else:
-        source_file_name = rst_file
-        code = textwrap.dedent("\n".join(map(str, content)))
-        counter = document.attributes.get('_plot_counter', 0) + 1
-        document.attributes['_plot_counter'] = counter
-        base, ext = os.path.splitext(os.path.basename(source_file_name))
-        output_base = '%s-%d.py' % (base, counter)
-
-    base, source_ext = os.path.splitext(output_base)
-    if source_ext in ('.py', '.rst', '.txt'):
-        output_base = base
-    else:
-        source_ext = ''
-
-    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
-    output_base = output_base.replace('.', '-')
-
-    # is it in doctest format?
-    is_doctest = contains_doctest(code)
-    if 'format' in options:
-        if options['format'] == 'python':
-            is_doctest = False
-        else:
-            is_doctest = True
-
-    # determine output directory name fragment
-    source_rel_name = relpath(source_file_name, setup.confdir)
-    source_rel_dir = os.path.dirname(source_rel_name)
-    while source_rel_dir.startswith(os.path.sep):
-        source_rel_dir = source_rel_dir[1:]
-
-    # build_dir: where to place output files (temporarily)
-    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
-                             'plot_directive',
-                             source_rel_dir)
-    if not os.path.exists(build_dir):
-        os.makedirs(build_dir)
-
-    # output_dir: final location in the builder's directory
-    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
-                                            source_rel_dir))
-
-    # how to link to files from the RST file
-    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
-                                 source_rel_dir).replace(os.path.sep, '/')
-    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
-    source_link = dest_dir_link + '/' + output_base + source_ext
-
-    # make figures
-    try:
-        results = makefig(code, source_file_name, build_dir, output_base,
-                          config)
-        errors = []
-    except PlotError as err:
-        reporter = state.memo.reporter
-        sm = reporter.system_message(
-            2, "Exception occurred in plotting %s: %s" % (output_base, err),
-            line=lineno)
-        results = [(code, [])]
-        errors = [sm]
-
-    # generate output restructuredtext
-    total_lines = []
-    for j, (code_piece, images) in enumerate(results):
-        if options['include-source']:
-            if is_doctest:
-                lines = ['']
-                lines += [row.rstrip() for row in code_piece.split('\n')]
-            else:
-                lines = ['.. code-block:: python', '']
-                lines += ['    %s' % row.rstrip()
-                          for row in code_piece.split('\n')]
-            source_code = "\n".join(lines)
-        else:
-            source_code = ""
-
-        opts = [':%s: %s' % (key, val) for key, val in list(options.items())
-                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
-
-        only_html = ".. only:: html"
-        only_latex = ".. only:: latex"
-
-        if j == 0:
-            src_link = source_link
-        else:
-            src_link = None
-
-        result = format_template(
-            TEMPLATE,
-            dest_dir=dest_dir_link,
-            build_dir=build_dir_link,
-            source_link=src_link,
-            multi_image=len(images) > 1,
-            only_html=only_html,
-            only_latex=only_latex,
-            options=opts,
-            images=images,
-            source_code=source_code,
-            html_show_formats=config.plot_html_show_formats)
-
-        total_lines.extend(result.split("\n"))
-        total_lines.extend("\n")
-
-    if total_lines:
-        state_machine.insert_input(total_lines, source=source_file_name)
-
-    # copy image files to builder's output directory
-    if not os.path.exists(dest_dir):
-        os.makedirs(dest_dir)
-
-    for code_piece, images in results:
-        for img in images:
-            for fn in img.filenames():
-                shutil.copyfile(fn, os.path.join(dest_dir,
-                                                 os.path.basename(fn)))
-
-    # copy script (if necessary)
-    if source_file_name == rst_file:
-        target_name = os.path.join(dest_dir, output_base + source_ext)
-        f = open(target_name, 'w')
-        f.write(unescape_doctest(code))
-        f.close()
-
-    return errors
-
-
-#------------------------------------------------------------------------------
-# Run code and capture figures
-#------------------------------------------------------------------------------
-
-import matplotlib
-matplotlib.use('Agg')
-import matplotlib.pyplot as plt
-import matplotlib.image as image
-from matplotlib import _pylab_helpers
-
-import exceptions
-
-def contains_doctest(text):
-    try:
-        # check if it's valid Python as-is
-        compile(text, '<string>', 'exec')
-        return False
-    except SyntaxError:
-        pass
-    r = re.compile(r'^\s*>>>', re.M)
-    m = r.search(text)
-    return bool(m)
-
-def unescape_doctest(text):
-    """
-    Extract code from a piece of text, which contains either Python code
-    or doctests.
-
-    """
-    if not contains_doctest(text):
-        return text
-
-    code = ""
-    for line in text.split("\n"):
-        m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
-        if m:
-            code += m.group(2) + "\n"
-        elif line.strip():
-            code += "# " + line.strip() + "\n"
-        else:
-            code += "\n"
-    return code
-
-def split_code_at_show(text):
-    """
-    Split code at plt.show()
-
-    """
-
-    parts = []
-    is_doctest = contains_doctest(text)
-
-    part = []
-    for line in text.split("\n"):
-        if (not is_doctest and line.strip() == 'plt.show()') or \
-               (is_doctest and line.strip() == '>>> plt.show()'):
-            part.append(line)
-            parts.append("\n".join(part))
-            part = []
-        else:
-            part.append(line)
-    if "\n".join(part).strip():
-        parts.append("\n".join(part))
-    return parts
-
-class PlotError(RuntimeError):
-    pass
-
-def run_code(code, code_path, ns=None):
-    # Change the working directory to the directory of the example, so
-    # it can get at its data files, if any.
-    pwd = os.getcwd()
-    old_sys_path = list(sys.path)
-    if code_path is not None:
-        dirname = os.path.abspath(os.path.dirname(code_path))
-        os.chdir(dirname)
-        sys.path.insert(0, dirname)
-
-    # Redirect stdout
-    stdout = sys.stdout
-    sys.stdout = StringIO()
-
-    # Reset sys.argv
-    old_sys_argv = sys.argv
-    sys.argv = [code_path]
-    
-    try:
-        try:
-            code = unescape_doctest(code)
-            if ns is None:
-                ns = {}
-            if not ns:
-                exec(setup.config.plot_pre_code, ns)
-            exec(code, ns)
-        except (Exception, SystemExit) as err:
-            raise PlotError(traceback.format_exc())
-    finally:
-        os.chdir(pwd)
-        sys.argv = old_sys_argv
-        sys.path[:] = old_sys_path
-        sys.stdout = stdout
-    return ns
-
-
-#------------------------------------------------------------------------------
-# Generating figures
-#------------------------------------------------------------------------------
-
-def out_of_date(original, derived):
-    """
-    Returns True if derivative is out-of-date wrt original,
-    both of which are full file paths.
-    """
-    return (not os.path.exists(derived)
-            or os.stat(derived).st_mtime < os.stat(original).st_mtime)
-
-
-def makefig(code, code_path, output_dir, output_base, config):
-    """
-    Run a pyplot script *code* and save the images under *output_dir*
-    with file names derived from *output_base*
-
-    """
-
-    # -- Parse format list
-    default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
-    formats = []
-    for fmt in config.plot_formats:
-        if isinstance(fmt, str):
-            formats.append((fmt, default_dpi.get(fmt, 80)))
-        elif type(fmt) in (tuple, list) and len(fmt)==2:
-            formats.append((str(fmt[0]), int(fmt[1])))
-        else:
-            raise PlotError('invalid image format "%r" in plot_formats' % fmt)
-
-    # -- Try to determine if all images already exist
-
-    code_pieces = split_code_at_show(code)
-
-    # Look for single-figure output files first
-    all_exists = True
-    img = ImageFile(output_base, output_dir)
-    for format, dpi in formats:
-        if out_of_date(code_path, img.filename(format)):
-            all_exists = False
-            break
-        img.formats.append(format)
-
-    if all_exists:
-        return [(code, [img])]
-
-    # Then look for multi-figure output files
-    results = []
-    all_exists = True
-    for i, code_piece in enumerate(code_pieces):
-        images = []
-        for j in range(1000):
-            img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
-            for format, dpi in formats:
-                if out_of_date(code_path, img.filename(format)):
-                    all_exists = False
-                    break
-                img.formats.append(format)
-
-            # assume that if we have one, we have them all
-            if not all_exists:
-                all_exists = (j > 0)
-                break
-            images.append(img)
-        if not all_exists:
-            break
-        results.append((code_piece, images))
-
-    if all_exists:
-        return results
-
-    # -- We didn't find the files, so build them
-
-    results = []
-    ns = {}
-
-    for i, code_piece in enumerate(code_pieces):
-        # Clear between runs
-        plt.close('all')
-
-        # Run code
-        run_code(code_piece, code_path, ns)
-
-        # Collect images
-        images = []
-        fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
-        for j, figman in enumerate(fig_managers):
-            if len(fig_managers) == 1 and len(code_pieces) == 1:
-                img = ImageFile(output_base, output_dir)
-            else:
-                img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
-                                output_dir)
-            images.append(img)
-            for format, dpi in formats:
-                try:
-                    figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
-                except exceptions.BaseException as err:
-                    raise PlotError(traceback.format_exc())
-                img.formats.append(format)
-
-        # Results
-        results.append((code_piece, images))
-
-    return results
-
-
-#------------------------------------------------------------------------------
-# Relative pathnames
-#------------------------------------------------------------------------------
-
-try:
-    from os.path import relpath
-except ImportError:
-    # Copied from Python 2.7
-    if 'posix' in sys.builtin_module_names:
-        def relpath(path, start=os.path.curdir):
-            """Return a relative version of a path"""
-            from os.path import sep, curdir, join, abspath, commonprefix, \
-                 pardir
-
-            if not path:
-                raise ValueError("no path specified")
-
-            start_list = abspath(start).split(sep)
-            path_list = abspath(path).split(sep)
-
-            # Work out how much of the filepath is shared by start and path.
-            i = len(commonprefix([start_list, path_list]))
-
-            rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
-            if not rel_list:
-                return curdir
-            return join(*rel_list)
-    elif 'nt' in sys.builtin_module_names:
-        def relpath(path, start=os.path.curdir):
-            """Return a relative version of a path"""
-            from os.path import sep, curdir, join, abspath, commonprefix, \
-                 pardir, splitunc
-
-            if not path:
-                raise ValueError("no path specified")
-            start_list = abspath(start).split(sep)
-            path_list = abspath(path).split(sep)
-            if start_list[0].lower() != path_list[0].lower():
-                unc_path, rest = splitunc(path)
-                unc_start, rest = splitunc(start)
-                if bool(unc_path) ^ bool(unc_start):
-                    raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
-                                                                        % (path, start))
-                else:
-                    raise ValueError("path is on drive %s, start on drive %s"
-                                                        % (path_list[0], start_list[0]))
-            # Work out how much of the filepath is shared by start and path.
-            for i in range(min(len(start_list), len(path_list))):
-                if start_list[i].lower() != path_list[i].lower():
-                    break
-            else:
-                i += 1
-
-            rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
-            if not rel_list:
-                return curdir
-            return join(*rel_list)
-    else:
-        raise RuntimeError("Unsupported platform (no relpath available!)")
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_docscrape.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_docscrape.py
deleted file mode 100644
index b682504..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/tests/test_docscrape.py
+++ /dev/null
@@ -1,767 +0,0 @@
-# -*- encoding:utf-8 -*-
-from __future__ import division, absolute_import, print_function
-
-import sys, textwrap
-
-from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
-from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
-from nose.tools import *
-
-if sys.version_info[0] >= 3:
-    sixu = lambda s: s
-else:
-    sixu = lambda s: unicode(s, 'unicode_escape')
-
-
-doc_txt = '''\
-  numpy.multivariate_normal(mean, cov, shape=None, spam=None)
-
-  Draw values from a multivariate normal distribution with specified
-  mean and covariance.
-
-  The multivariate normal or Gaussian distribution is a generalisation
-  of the one-dimensional normal distribution to higher dimensions.
-
-  Parameters
-  ----------
-  mean : (N,) ndarray
-      Mean of the N-dimensional distribution.
-
-      .. math::
-
-         (1+2+3)/3
-
-  cov : (N, N) ndarray
-      Covariance matrix of the distribution.
-  shape : tuple of ints
-      Given a shape of, for example, (m,n,k), m*n*k samples are
-      generated, and packed in an m-by-n-by-k arrangement.  Because
-      each sample is N-dimensional, the output shape is (m,n,k,N).
-
-  Returns
-  -------
-  out : ndarray
-      The drawn samples, arranged according to `shape`.  If the
-      shape given is (m,n,...), then the shape of `out` is is
-      (m,n,...,N).
-
-      In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
-      value drawn from the distribution.
-  list of str
-      This is not a real return value.  It exists to test
-      anonymous return values.
-
-  Other Parameters
-  ----------------
-  spam : parrot
-      A parrot off its mortal coil.
-
-  Raises
-  ------
-  RuntimeError
-      Some error
-
-  Warns
-  -----
-  RuntimeWarning
-      Some warning
-
-  Warnings
-  --------
-  Certain warnings apply.
-
-  Notes
-  -----
-  Instead of specifying the full covariance matrix, popular
-  approximations include:
-
-    - Spherical covariance (`cov` is a multiple of the identity matrix)
-    - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
-
-  This geometrical property can be seen in two dimensions by plotting
-  generated data-points:
-
-  >>> mean = [0,0]
-  >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
-
-  >>> x,y = multivariate_normal(mean,cov,5000).T
-  >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
-
-  Note that the covariance matrix must be symmetric and non-negative
-  definite.
-
-  References
-  ----------
-  .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
-         Processes," 3rd ed., McGraw-Hill Companies, 1991
-  .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
-         2nd ed., Wiley, 2001.
-
-  See Also
-  --------
-  some, other, funcs
-  otherfunc : relationship
-
-  Examples
-  --------
-  >>> mean = (1,2)
-  >>> cov = [[1,0],[1,0]]
-  >>> x = multivariate_normal(mean,cov,(3,3))
-  >>> print x.shape
-  (3, 3, 2)
-
-  The following is probably true, given that 0.6 is roughly twice the
-  standard deviation:
-
-  >>> print list( (x[0,0,:] - mean) < 0.6 )
-  [True, True]
-
-  .. index:: random
-     :refguide: random;distributions, random;gauss
-
-  '''
-doc = NumpyDocString(doc_txt)
-
-
-def test_signature():
-    assert doc['Signature'].startswith('numpy.multivariate_normal(')
-    assert doc['Signature'].endswith('spam=None)')
-
-def test_summary():
-    assert doc['Summary'][0].startswith('Draw values')
-    assert doc['Summary'][-1].endswith('covariance.')
-
-def test_extended_summary():
-    assert doc['Extended Summary'][0].startswith('The multivariate normal')
-
-def test_parameters():
-    assert_equal(len(doc['Parameters']), 3)
-    assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
-
-    arg, arg_type, desc = doc['Parameters'][1]
-    assert_equal(arg_type, '(N, N) ndarray')
-    assert desc[0].startswith('Covariance matrix')
-    assert doc['Parameters'][0][-1][-2] == '   (1+2+3)/3'
-
-def test_other_parameters():
-    assert_equal(len(doc['Other Parameters']), 1)
-    assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
-    arg, arg_type, desc = doc['Other Parameters'][0]
-    assert_equal(arg_type, 'parrot')
-    assert desc[0].startswith('A parrot off its mortal coil')
-
-def test_returns():
-    assert_equal(len(doc['Returns']), 2)
-    arg, arg_type, desc = doc['Returns'][0]
-    assert_equal(arg, 'out')
-    assert_equal(arg_type, 'ndarray')
-    assert desc[0].startswith('The drawn samples')
-    assert desc[-1].endswith('distribution.')
-
-    arg, arg_type, desc = doc['Returns'][1]
-    assert_equal(arg, 'list of str')
-    assert_equal(arg_type, '')
-    assert desc[0].startswith('This is not a real')
-    assert desc[-1].endswith('anonymous return values.')
-
-def test_notes():
-    assert doc['Notes'][0].startswith('Instead')
-    assert doc['Notes'][-1].endswith('definite.')
-    assert_equal(len(doc['Notes']), 17)
-
-def test_references():
-    assert doc['References'][0].startswith('..')
-    assert doc['References'][-1].endswith('2001.')
-
-def test_examples():
-    assert doc['Examples'][0].startswith('>>>')
-    assert doc['Examples'][-1].endswith('True]')
-
-def test_index():
-    assert_equal(doc['index']['default'], 'random')
-    assert_equal(len(doc['index']), 2)
-    assert_equal(len(doc['index']['refguide']), 2)
-
-def non_blank_line_by_line_compare(a,b):
-    a = textwrap.dedent(a)
-    b = textwrap.dedent(b)
-    a = [l.rstrip() for l in a.split('\n') if l.strip()]
-    b = [l.rstrip() for l in b.split('\n') if l.strip()]
-    for n,line in enumerate(a):
-        if not line == b[n]:
-            raise AssertionError("Lines %s of a and b differ: "
-                                 "\n>>> %s\n<<< %s\n" %
-                                 (n,line,b[n]))
-def test_str():
-    non_blank_line_by_line_compare(str(doc),
-"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
-
-Draw values from a multivariate normal distribution with specified
-mean and covariance.
-
-The multivariate normal or Gaussian distribution is a generalisation
-of the one-dimensional normal distribution to higher dimensions.
-
-Parameters
-----------
-mean : (N,) ndarray
-    Mean of the N-dimensional distribution.
-
-    .. math::
-
-       (1+2+3)/3
-
-cov : (N, N) ndarray
-    Covariance matrix of the distribution.
-shape : tuple of ints
-    Given a shape of, for example, (m,n,k), m*n*k samples are
-    generated, and packed in an m-by-n-by-k arrangement.  Because
-    each sample is N-dimensional, the output shape is (m,n,k,N).
-
-Returns
--------
-out : ndarray
-    The drawn samples, arranged according to `shape`.  If the
-    shape given is (m,n,...), then the shape of `out` is is
-    (m,n,...,N).
-
-    In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
-    value drawn from the distribution.
-list of str
-    This is not a real return value.  It exists to test
-    anonymous return values.
-
-Other Parameters
-----------------
-spam : parrot
-    A parrot off its mortal coil.
-
-Raises
-------
-RuntimeError
-    Some error
-
-Warns
------
-RuntimeWarning
-    Some warning
-
-Warnings
---------
-Certain warnings apply.
-
-See Also
---------
-`some`_, `other`_, `funcs`_
-
-`otherfunc`_
-    relationship
-
-Notes
------
-Instead of specifying the full covariance matrix, popular
-approximations include:
-
-  - Spherical covariance (`cov` is a multiple of the identity matrix)
-  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
-
-This geometrical property can be seen in two dimensions by plotting
-generated data-points:
-
->>> mean = [0,0]
->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
-
->>> x,y = multivariate_normal(mean,cov,5000).T
->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
-
-Note that the covariance matrix must be symmetric and non-negative
-definite.
-
-References
-----------
-.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
-       Processes," 3rd ed., McGraw-Hill Companies, 1991
-.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
-       2nd ed., Wiley, 2001.
-
-Examples
---------
->>> mean = (1,2)
->>> cov = [[1,0],[1,0]]
->>> x = multivariate_normal(mean,cov,(3,3))
->>> print x.shape
-(3, 3, 2)
-
-The following is probably true, given that 0.6 is roughly twice the
-standard deviation:
-
->>> print list( (x[0,0,:] - mean) < 0.6 )
-[True, True]
-
-.. index:: random
-   :refguide: random;distributions, random;gauss""")
-
-
-def test_sphinx_str():
-    sphinx_doc = SphinxDocString(doc_txt)
-    non_blank_line_by_line_compare(str(sphinx_doc),
-"""
-.. index:: random
-   single: random;distributions, random;gauss
-
-Draw values from a multivariate normal distribution with specified
-mean and covariance.
-
-The multivariate normal or Gaussian distribution is a generalisation
-of the one-dimensional normal distribution to higher dimensions.
-
-:Parameters:
-
-    **mean** : (N,) ndarray
-
-        Mean of the N-dimensional distribution.
-
-        .. math::
-
-           (1+2+3)/3
-
-    **cov** : (N, N) ndarray
-
-        Covariance matrix of the distribution.
-
-    **shape** : tuple of ints
-
-        Given a shape of, for example, (m,n,k), m*n*k samples are
-        generated, and packed in an m-by-n-by-k arrangement.  Because
-        each sample is N-dimensional, the output shape is (m,n,k,N).
-
-:Returns:
-
-    **out** : ndarray
-
-        The drawn samples, arranged according to `shape`.  If the
-        shape given is (m,n,...), then the shape of `out` is is
-        (m,n,...,N).
-
-        In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
-        value drawn from the distribution.
-
-    list of str
-
-        This is not a real return value.  It exists to test
-        anonymous return values.
-
-:Other Parameters:
-
-    **spam** : parrot
-
-        A parrot off its mortal coil.
-
-:Raises:
-
-    **RuntimeError**
-
-        Some error
-
-:Warns:
-
-    **RuntimeWarning**
-
-        Some warning
-
-.. warning::
-
-    Certain warnings apply.
-
-.. seealso::
-
-    :obj:`some`, :obj:`other`, :obj:`funcs`
-
-    :obj:`otherfunc`
-        relationship
-
-.. rubric:: Notes
-
-Instead of specifying the full covariance matrix, popular
-approximations include:
-
-  - Spherical covariance (`cov` is a multiple of the identity matrix)
-  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
-
-This geometrical property can be seen in two dimensions by plotting
-generated data-points:
-
->>> mean = [0,0]
->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
-
->>> x,y = multivariate_normal(mean,cov,5000).T
->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
-
-Note that the covariance matrix must be symmetric and non-negative
-definite.
-
-.. rubric:: References
-
-.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
-       Processes," 3rd ed., McGraw-Hill Companies, 1991
-.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
-       2nd ed., Wiley, 2001.
-
-.. only:: latex
-
-   [1]_, [2]_
-
-.. rubric:: Examples
-
->>> mean = (1,2)
->>> cov = [[1,0],[1,0]]
->>> x = multivariate_normal(mean,cov,(3,3))
->>> print x.shape
-(3, 3, 2)
-
-The following is probably true, given that 0.6 is roughly twice the
-standard deviation:
-
->>> print list( (x[0,0,:] - mean) < 0.6 )
-[True, True]
-""")
-
-
-doc2 = NumpyDocString("""
-    Returns array of indices of the maximum values of along the given axis.
-
-    Parameters
-    ----------
-    a : {array_like}
-        Array to look in.
-    axis : {None, integer}
-        If None, the index is into the flattened array, otherwise along
-        the specified axis""")
-
-def test_parameters_without_extended_description():
-    assert_equal(len(doc2['Parameters']), 2)
-
-doc3 = NumpyDocString("""
-    my_signature(*params, **kwds)
-
-    Return this and that.
-    """)
-
-def test_escape_stars():
-    signature = str(doc3).split('\n')[0]
-    assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
-
-doc4 = NumpyDocString(
-    """a.conj()
-
-    Return an array with all complex-valued elements conjugated.""")
-
-def test_empty_extended_summary():
-    assert_equal(doc4['Extended Summary'], [])
-
-doc5 = NumpyDocString(
-    """
-    a.something()
-
-    Raises
-    ------
-    LinAlgException
-        If array is singular.
-
-    Warns
-    -----
-    SomeWarning
-        If needed
-    """)
-
-def test_raises():
-    assert_equal(len(doc5['Raises']), 1)
-    name,_,desc = doc5['Raises'][0]
-    assert_equal(name,'LinAlgException')
-    assert_equal(desc,['If array is singular.'])
-
-def test_warns():
-    assert_equal(len(doc5['Warns']), 1)
-    name,_,desc = doc5['Warns'][0]
-    assert_equal(name,'SomeWarning')
-    assert_equal(desc,['If needed'])
-
-def test_see_also():
-    doc6 = NumpyDocString(
-    """
-    z(x,theta)
-
-    See Also
-    --------
-    func_a, func_b, func_c
-    func_d : some equivalent func
-    foo.func_e : some other func over
-             multiple lines
-    func_f, func_g, :meth:`func_h`, func_j,
-    func_k
-    :obj:`baz.obj_q`
-    :class:`class_j`: fubar
-        foobar
-    """)
-
-    assert len(doc6['See Also']) == 12
-    for func, desc, role in doc6['See Also']:
-        if func in ('func_a', 'func_b', 'func_c', 'func_f',
-                    'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
-            assert(not desc)
-        else:
-            assert(desc)
-
-        if func == 'func_h':
-            assert role == 'meth'
-        elif func == 'baz.obj_q':
-            assert role == 'obj'
-        elif func == 'class_j':
-            assert role == 'class'
-        else:
-            assert role is None
-
-        if func == 'func_d':
-            assert desc == ['some equivalent func']
-        elif func == 'foo.func_e':
-            assert desc == ['some other func over', 'multiple lines']
-        elif func == 'class_j':
-            assert desc == ['fubar', 'foobar']
-
-def test_see_also_print():
-    class Dummy(object):
-        """
-        See Also
-        --------
-        func_a, func_b
-        func_c : some relationship
-                 goes here
-        func_d
-        """
-        pass
-
-    obj = Dummy()
-    s = str(FunctionDoc(obj, role='func'))
-    assert(':func:`func_a`, :func:`func_b`' in s)
-    assert('    some relationship' in s)
-    assert(':func:`func_d`' in s)
-
-doc7 = NumpyDocString("""
-
-        Doc starts on second line.
-
-        """)
-
-def test_empty_first_line():
-    assert doc7['Summary'][0].startswith('Doc starts')
-
-
-def test_no_summary():
-    str(SphinxDocString("""
-    Parameters
-    ----------"""))
-
-
-def test_unicode():
-    doc = SphinxDocString("""
-    öäöäöäöäöåååå
-
-    öäöäöäööäååå
-
-    Parameters
-    ----------
-    ååå : äää
-        ööö
-
-    Returns
-    -------
-    ååå : ööö
-        äää
-
-    """)
-    assert isinstance(doc['Summary'][0], str)
-    assert doc['Summary'][0] == 'öäöäöäöäöåååå'
-
-def test_plot_examples():
-    cfg = dict(use_plots=True)
-
-    doc = SphinxDocString("""
-    Examples
-    --------
-    >>> import matplotlib.pyplot as plt
-    >>> plt.plot([1,2,3],[4,5,6])
-    >>> plt.show()
-    """, config=cfg)
-    assert 'plot::' in str(doc), str(doc)
-
-    doc = SphinxDocString("""
-    Examples
-    --------
-    .. plot::
-
-       import matplotlib.pyplot as plt
-       plt.plot([1,2,3],[4,5,6])
-       plt.show()
-    """, config=cfg)
-    assert str(doc).count('plot::') == 1, str(doc)
-
-def test_class_members():
-
-    class Dummy(object):
-        """
-        Dummy class.
-
-        """
-        def spam(self, a, b):
-            """Spam\n\nSpam spam."""
-            pass
-        def ham(self, c, d):
-            """Cheese\n\nNo cheese."""
-            pass
-        @property
-        def spammity(self):
-            """Spammity index"""
-            return 0.95
-
-        class Ignorable(object):
-            """local class, to be ignored"""
-            pass
-
-    for cls in (ClassDoc, SphinxClassDoc):
-        doc = cls(Dummy, config=dict(show_class_members=False))
-        assert 'Methods' not in str(doc), (cls, str(doc))
-        assert 'spam' not in str(doc), (cls, str(doc))
-        assert 'ham' not in str(doc), (cls, str(doc))
-        assert 'spammity' not in str(doc), (cls, str(doc))
-        assert 'Spammity index' not in str(doc), (cls, str(doc))
-
-        doc = cls(Dummy, config=dict(show_class_members=True))
-        assert 'Methods' in str(doc), (cls, str(doc))
-        assert 'spam' in str(doc), (cls, str(doc))
-        assert 'ham' in str(doc), (cls, str(doc))
-        assert 'spammity' in str(doc), (cls, str(doc))
-
-        if cls is SphinxClassDoc:
-            assert '.. autosummary::' in str(doc), str(doc)
-        else:
-            assert 'Spammity index' in str(doc), str(doc)
-
-def test_duplicate_signature():
-    # Duplicate function signatures occur e.g. in ufuncs, when the
-    # automatic mechanism adds one, and a more detailed comes from the
-    # docstring itself.
-
-    doc = NumpyDocString(
-    """
-    z(x1, x2)
-
-    z(a, theta)
-    """)
-
-    assert doc['Signature'].strip() == 'z(a, theta)'
-
-
-class_doc_txt = """
-    Foo
-
-    Parameters
-    ----------
-    f : callable ``f(t, y, *f_args)``
-        Aaa.
-    jac : callable ``jac(t, y, *jac_args)``
-        Bbb.
-
-    Attributes
-    ----------
-    t : float
-        Current time.
-    y : ndarray
-        Current variable values.
-
-    Methods
-    -------
-    a
-    b
-    c
-
-    Examples
-    --------
-    For usage examples, see `ode`.
-"""
-
-def test_class_members_doc():
-    doc = ClassDoc(None, class_doc_txt)
-    non_blank_line_by_line_compare(str(doc),
-    """
-    Foo
-
-    Parameters
-    ----------
-    f : callable ``f(t, y, *f_args)``
-        Aaa.
-    jac : callable ``jac(t, y, *jac_args)``
-        Bbb.
-
-    Examples
-    --------
-    For usage examples, see `ode`.
-
-    Attributes
-    ----------
-    t : float
-        Current time.
-    y : ndarray
-        Current variable values.
-
-    Methods
-    -------
-    a
-
-    b
-
-    c
-
-    .. index::
-
-    """)
-
-def test_class_members_doc_sphinx():
-    doc = SphinxClassDoc(None, class_doc_txt)
-    non_blank_line_by_line_compare(str(doc),
-    """
-    Foo
-
-    :Parameters:
-
-        **f** : callable ``f(t, y, *f_args)``
-
-            Aaa.
-
-        **jac** : callable ``jac(t, y, *jac_args)``
-
-            Bbb.
-
-    .. rubric:: Examples
-
-    For usage examples, see `ode`.
-
-    .. rubric:: Attributes
-
-    ===  ==========
-      t  (float) Current time.
-      y  (ndarray) Current variable values.
-    ===  ==========
-
-    .. rubric:: Methods
-
-    ===  ==========
-      a
-      b
-      c
-    ===  ==========
-
-    """)
-
-if __name__ == "__main__":
-    import nose
-    nose.run()
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_linkcode.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_linkcode.py
deleted file mode 100644
index 340166a..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/tests/test_linkcode.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import numpydoc.linkcode
-
-# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_phantom_import.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_phantom_import.py
deleted file mode 100644
index 80fae08..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/tests/test_phantom_import.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
-from nose import SkipTest
-
-def test_import():
-    if sys.version_info[0] >= 3:
-        raise SkipTest("phantom_import not ported to Py3")
-
-    import numpydoc.phantom_import
-
-# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_plot_directive.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_plot_directive.py
deleted file mode 100644
index 1ea1076..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/tests/test_plot_directive.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
-from nose import SkipTest
-
-def test_import():
-    if sys.version_info[0] >= 3:
-        raise SkipTest("plot_directive not ported to Python 3 (use the one from Matplotlib instead)")
-    import numpydoc.plot_directive
-
-# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/numpydoc/tests/test_traitsdoc.py b/doc/sphinxext/numpydoc/numpydoc/tests/test_traitsdoc.py
deleted file mode 100644
index fe5078c..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/tests/test_traitsdoc.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
-from nose import SkipTest
-
-def test_import():
-    if sys.version_info[0] >= 3:
-        raise SkipTest("traitsdoc not ported to Python3")
-    import numpydoc.traitsdoc
-
-# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/numpydoc/traitsdoc.py b/doc/sphinxext/numpydoc/numpydoc/traitsdoc.py
deleted file mode 100644
index 596c54e..0000000
--- a/doc/sphinxext/numpydoc/numpydoc/traitsdoc.py
+++ /dev/null
@@ -1,142 +0,0 @@
-"""
-=========
-traitsdoc
-=========
-
-Sphinx extension that handles docstrings in the Numpy standard format, [1]
-and support Traits [2].
-
-This extension can be used as a replacement for ``numpydoc`` when support
-for Traits is required.
-
-.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
-.. [2] http://code.enthought.com/projects/traits/
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import inspect
-import os
-import pydoc
-import collections
-
-from . import docscrape
-from . import docscrape_sphinx
-from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString
-
-from . import numpydoc
-
-from . import comment_eater
-
-class SphinxTraitsDoc(SphinxClassDoc):
-    def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
-        if not inspect.isclass(cls):
-            raise ValueError("Initialise using a class. Got %r" % cls)
-        self._cls = cls
-
-        if modulename and not modulename.endswith('.'):
-            modulename += '.'
-        self._mod = modulename
-        self._name = cls.__name__
-        self._func_doc = func_doc
-
-        docstring = pydoc.getdoc(cls)
-        docstring = docstring.split('\n')
-
-        # De-indent paragraph
-        try:
-            indent = min(len(s) - len(s.lstrip()) for s in docstring
-                         if s.strip())
-        except ValueError:
-            indent = 0
-
-        for n,line in enumerate(docstring):
-            docstring[n] = docstring[n][indent:]
-
-        self._doc = docscrape.Reader(docstring)
-        self._parsed_data = {
-            'Signature': '',
-            'Summary': '',
-            'Description': [],
-            'Extended Summary': [],
-            'Parameters': [],
-            'Returns': [],
-            'Raises': [],
-            'Warns': [],
-            'Other Parameters': [],
-            'Traits': [],
-            'Methods': [],
-            'See Also': [],
-            'Notes': [],
-            'References': '',
-            'Example': '',
-            'Examples': '',
-            'index': {}
-            }
-
-        self._parse()
-
-    def _str_summary(self):
-        return self['Summary'] + ['']
-
-    def _str_extended_summary(self):
-        return self['Description'] + self['Extended Summary'] + ['']
-
-    def __str__(self, indent=0, func_role="func"):
-        out = []
-        out += self._str_signature()
-        out += self._str_index() + ['']
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters', 'Traits', 'Methods',
-                           'Returns','Raises'):
-            out += self._str_param_list(param_list)
-        out += self._str_see_also("obj")
-        out += self._str_section('Notes')
-        out += self._str_references()
-        out += self._str_section('Example')
-        out += self._str_section('Examples')
-        out = self._str_indent(out,indent)
-        return '\n'.join(out)
-
-def looks_like_issubclass(obj, classname):
-    """ Return True if the object has a class or superclass with the given class
-    name.
-
-    Ignores old-style classes.
-    """
-    t = obj
-    if t.__name__ == classname:
-        return True
-    for klass in t.__mro__:
-        if klass.__name__ == classname:
-            return True
-    return False
-
-def get_doc_object(obj, what=None, config=None):
-    if what is None:
-        if inspect.isclass(obj):
-            what = 'class'
-        elif inspect.ismodule(obj):
-            what = 'module'
-        elif isinstance(obj, collections.Callable):
-            what = 'function'
-        else:
-            what = 'object'
-    if what == 'class':
-        doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config)
-        if looks_like_issubclass(obj, 'HasTraits'):
-            for name, trait, comment in comment_eater.get_class_traits(obj):
-                # Exclude private traits.
-                if not name.startswith('_'):
-                    doc['Traits'].append((name, trait, comment.splitlines()))
-        return doc
-    elif what in ('function', 'method'):
-        return SphinxFunctionDoc(obj, '', config=config)
-    else:
-        return SphinxDocString(pydoc.getdoc(obj), config=config)
-
-def setup(app):
-    # init numpydoc
-    numpydoc.setup(app, get_doc_object)
-
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/README.md b/ipynbs/presentations/2014.05.13-jairideout/README.md
similarity index 100%
rename from ipynbs/presentations/2014.05.13-ElBrogrammer/README.md
rename to ipynbs/presentations/2014.05.13-jairideout/README.md
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/dm.txt b/ipynbs/presentations/2014.05.13-jairideout/dm.txt
similarity index 100%
rename from ipynbs/presentations/2014.05.13-ElBrogrammer/dm.txt
rename to ipynbs/presentations/2014.05.13-jairideout/dm.txt
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/map.txt b/ipynbs/presentations/2014.05.13-jairideout/map.txt
similarity index 100%
rename from ipynbs/presentations/2014.05.13-ElBrogrammer/map.txt
rename to ipynbs/presentations/2014.05.13-jairideout/map.txt
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/scikit-bio presentation.ipynb b/ipynbs/presentations/2014.05.13-jairideout/scikit-bio presentation.ipynb
similarity index 100%
rename from ipynbs/presentations/2014.05.13-ElBrogrammer/scikit-bio presentation.ipynb
rename to ipynbs/presentations/2014.05.13-jairideout/scikit-bio presentation.ipynb
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/smalldm.txt b/ipynbs/presentations/2014.05.13-jairideout/smalldm.txt
similarity index 100%
rename from ipynbs/presentations/2014.05.13-ElBrogrammer/smalldm.txt
rename to ipynbs/presentations/2014.05.13-jairideout/smalldm.txt
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/style.css b/ipynbs/presentations/2014.05.13-jairideout/style.css
similarity index 100%
rename from ipynbs/presentations/2014.05.13-ElBrogrammer/style.css
rename to ipynbs/presentations/2014.05.13-jairideout/style.css
diff --git a/ipynbs/presentations/2014.05.13-ElBrogrammer/talktools.py b/ipynbs/presentations/2014.05.13-jairideout/talktools.py
similarity index 100%
rename from ipynbs/presentations/2014.05.13-ElBrogrammer/talktools.py
rename to ipynbs/presentations/2014.05.13-jairideout/talktools.py
diff --git a/licenses/flask.txt b/licenses/flask.txt
new file mode 100644
index 0000000..a7da10e
--- /dev/null
+++ b/licenses/flask.txt
@@ -0,0 +1,33 @@
+Copyright (c) 2015 by Armin Ronacher and contributors.  See AUTHORS
+for more details.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms of the software as well
+as documentation, with or without modification, are permitted provided
+that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above
+  copyright notice, this list of conditions and the following
+  disclaimer in the documentation and/or other materials provided
+  with the distribution.
+
+* The names of the contributors may not be used to endorse or
+  promote products derived from this software without specific
+  prior written permission.
+
+THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
+NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
diff --git a/licenses/numpydoc.txt b/licenses/numpydoc.txt
deleted file mode 100644
index fe10d70..0000000
--- a/licenses/numpydoc.txt
+++ /dev/null
@@ -1 +0,0 @@
-numpydoc license is at scikit-bio/doc/sphinxext/numpydoc/LICENSE.txt
diff --git a/licenses/python.txt b/licenses/python.txt
new file mode 100644
index 0000000..832d272
--- /dev/null
+++ b/licenses/python.txt
@@ -0,0 +1,270 @@
+A. HISTORY OF THE SOFTWARE
+==========================
+
+Python was created in the early 1990s by Guido van Rossum at Stichting
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
+as a successor of a language called ABC.  Guido remains Python's
+principal author, although it includes many contributions from others.
+
+In 1995, Guido continued his work on Python at the Corporation for
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
+in Reston, Virginia where he released several versions of the
+software.
+
+In May 2000, Guido and the Python core development team moved to
+BeOpen.com to form the BeOpen PythonLabs team.  In October of the same
+year, the PythonLabs team moved to Digital Creations (now Zope
+Corporation, see http://www.zope.com).  In 2001, the Python Software
+Foundation (PSF, see http://www.python.org/psf/) was formed, a
+non-profit organization created specifically to own Python-related
+Intellectual Property.  Zope Corporation is a sponsoring member of
+the PSF.
+
+All Python releases are Open Source (see http://www.opensource.org for
+the Open Source Definition).  Historically, most, but not all, Python
+releases have also been GPL-compatible; the table below summarizes
+the various releases.
+
+    Release         Derived     Year        Owner       GPL-
+                    from                                compatible? (1)
+
+    0.9.0 thru 1.2              1991-1995   CWI         yes
+    1.3 thru 1.5.2  1.2         1995-1999   CNRI        yes
+    1.6             1.5.2       2000        CNRI        no
+    2.0             1.6         2000        BeOpen.com  no
+    1.6.1           1.6         2001        CNRI        yes (2)
+    2.1             2.0+1.6.1   2001        PSF         no
+    2.0.1           2.0+1.6.1   2001        PSF         yes
+    2.1.1           2.1+2.0.1   2001        PSF         yes
+    2.2             2.1.1       2001        PSF         yes
+    2.1.2           2.1.1       2002        PSF         yes
+    2.1.3           2.1.2       2002        PSF         yes
+    2.2.1           2.2         2002        PSF         yes
+    2.2.2           2.2.1       2002        PSF         yes
+    2.2.3           2.2.2       2003        PSF         yes
+    2.3             2.2.2       2002-2003   PSF         yes
+    2.3.1           2.3         2002-2003   PSF         yes
+    2.3.2           2.3.1       2002-2003   PSF         yes
+    2.3.3           2.3.2       2002-2003   PSF         yes
+    2.3.4           2.3.3       2004        PSF         yes
+    2.3.5           2.3.4       2005        PSF         yes
+    2.4             2.3         2004        PSF         yes
+    2.4.1           2.4         2005        PSF         yes
+    2.4.2           2.4.1       2005        PSF         yes
+    2.4.3           2.4.2       2006        PSF         yes
+    2.5             2.4         2006        PSF         yes
+    2.7             2.6         2010        PSF         yes
+
+Footnotes:
+
+(1) GPL-compatible doesn't mean that we're distributing Python under
+    the GPL.  All Python licenses, unlike the GPL, let you distribute
+    a modified version without making your changes open source.  The
+    GPL-compatible licenses make it possible to combine Python with
+    other software that is released under the GPL; the others don't.
+
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
+    because its license has a choice of law clause.  According to
+    CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
+    is "not incompatible" with the GPL.
+
+Thanks to the many outside volunteers who have worked under Guido's
+direction to make these releases possible.
+
+
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
+===============================================================
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python
+alone or in any derivative version, provided, however, that PSF's
+License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
+2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights
+Reserved" are retained in Python alone or in any derivative version
+prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee.  This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
+-------------------------------------------
+
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
+
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
+Individual or Organization ("Licensee") accessing and otherwise using
+this software in source or binary form and its associated
+documentation ("the Software").
+
+2. Subject to the terms and conditions of this BeOpen Python License
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
+royalty-free, world-wide license to reproduce, analyze, test, perform
+and/or display publicly, prepare derivative works, distribute, and
+otherwise use the Software alone or in any derivative version,
+provided, however, that the BeOpen Python License is retained in the
+Software, alone or in any derivative version prepared by Licensee.
+
+3. BeOpen is making the Software available to Licensee on an "AS IS"
+basis.  BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+5. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+6. This License Agreement shall be governed by and interpreted in all
+respects by the law of the State of California, excluding conflict of
+law provisions.  Nothing in this License Agreement shall be deemed to
+create any relationship of agency, partnership, or joint venture
+between BeOpen and Licensee.  This License Agreement does not grant
+permission to use BeOpen trademarks or trade names in a trademark
+sense to endorse or promote products or services of Licensee, or any
+third party.  As an exception, the "BeOpen Python" logos available at
+http://www.pythonlabs.com/logos.html may be used according to the
+permissions granted on that web page.
+
+7. By copying, installing or otherwise using the software, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
+---------------------------------------
+
+1. This LICENSE AGREEMENT is between the Corporation for National
+Research Initiatives, having an office at 1895 Preston White Drive,
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
+("Licensee") accessing and otherwise using Python 1.6.1 software in
+source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, CNRI
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python 1.6.1
+alone or in any derivative version, provided, however, that CNRI's
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
+1995-2001 Corporation for National Research Initiatives; All Rights
+Reserved" are retained in Python 1.6.1 alone or in any derivative
+version prepared by Licensee.  Alternately, in lieu of CNRI's License
+Agreement, Licensee may substitute the following text (omitting the
+quotes): "Python 1.6.1 is made available subject to the terms and
+conditions in CNRI's License Agreement.  This Agreement together with
+Python 1.6.1 may be located on the Internet using the following
+unique, persistent identifier (known as a handle): 1895.22/1013.  This
+Agreement may also be obtained from a proxy server on the Internet
+using the following URL: http://hdl.handle.net/1895.22/1013".
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python 1.6.1 or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python 1.6.1.
+
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
+basis.  CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. This License Agreement shall be governed by the federal
+intellectual property law of the United States, including without
+limitation the federal copyright law, and, to the extent such
+U.S. federal law does not apply, by the law of the Commonwealth of
+Virginia, excluding Virginia's conflict of law provisions.
+Notwithstanding the foregoing, with regard to derivative works based
+on Python 1.6.1 that incorporate non-separable material that was
+previously distributed under the GNU General Public License (GPL), the
+law of the Commonwealth of Virginia shall govern this License
+Agreement only as to issues arising under or with respect to
+Paragraphs 4, 5, and 7 of this License Agreement.  Nothing in this
+License Agreement shall be deemed to create any relationship of
+agency, partnership, or joint venture between CNRI and Licensee.  This
+License Agreement does not grant permission to use CNRI trademarks or
+trade name in a trademark sense to endorse or promote products or
+services of Licensee, or any third party.
+
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
+installing or otherwise using Python 1.6.1, Licensee agrees to be
+bound by the terms and conditions of this License Agreement.
+
+        ACCEPT
+
+
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
+--------------------------------------------------
+
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
+The Netherlands.  All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and that
+both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of Stichting Mathematisch
+Centrum or CWI not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/setup.py b/setup.py
index bcccc34..02ae5ab 100644
--- a/setup.py
+++ b/setup.py
@@ -10,15 +10,34 @@
 
 import os
 import platform
+import re
+import ast
 from setuptools import find_packages, setup
 from setuptools.extension import Extension
+from setuptools.command.build_ext import build_ext as _build_ext
 
-import numpy as np
 
-__version__ = "0.2.3"
+# Bootstrap setup.py with numpy
+# Huge thanks to coldfix's solution
+# http://stackoverflow.com/a/21621689/579416
+class build_ext(_build_ext):
+    def finalize_options(self):
+        _build_ext.finalize_options(self)
+        # Prevent numpy from thinking it is still in its setup process:
+        __builtins__.__NUMPY_SETUP__ = False
+        import numpy
+        self.include_dirs.append(numpy.get_include())
+
+# version parsing from __init__ pulled from Flask's setup.py
+# https://github.com/mitsuhiko/flask/blob/master/setup.py
+_version_re = re.compile(r'__version__\s+=\s+(.*)')
+
+with open('skbio/__init__.py', 'rb') as f:
+    hit = _version_re.search(f.read().decode('utf-8')).group(1)
+    version = str(ast.literal_eval(hit))
 
 classes = """
-    Development Status :: 1 - Planning
+    Development Status :: 4 - Beta
     License :: OSI Approved :: BSD License
     Topic :: Software Development :: Libraries
     Topic :: Scientific/Engineering
@@ -72,7 +91,7 @@ if USE_CYTHON:
     extensions = cythonize(extensions)
 
 setup(name='scikit-bio',
-      version=__version__,
+      version=version,
       license='BSD',
       description=description,
       long_description=long_description,
@@ -84,19 +103,31 @@ setup(name='scikit-bio',
       test_suite='nose.collector',
       packages=find_packages(),
       ext_modules=extensions,
-      include_dirs=[np.get_include()],
-      install_requires=['numpy >= 1.7', 'matplotlib >= 1.1.0',
-                        'scipy >= 0.13.0', 'pandas', 'future', 'six',
-                        'natsort', 'IPython'],
-      extras_require={'test': ["nose >= 0.10.1", "pep8", "flake8",
+      cmdclass={'build_ext': build_ext},
+      setup_requires=['numpy >= 1.9.2'],
+      install_requires=[
+          'bz2file >= 0.98',
+          'CacheControl[FileCache] >= 0.11.5',
+          'contextlib2 >= 0.4.0',
+          'decorator >= 3.4.2',
+          'future >= 0.14.3',
+          'IPython >= 3.2.0',
+          'matplotlib >= 1.4.3',
+          'natsort >= 4.0.3',
+          'numpy >= 1.9.2',
+          'pandas >= 0.16.2',
+          'scipy >= 0.15.1',
+          'six >= 1.9.0'
+      ],
+      extras_require={'test': ["HTTPretty", "nose", "pep8", "flake8",
                                "python-dateutil"],
                       'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
       classifiers=classifiers,
       package_data={
           'skbio.io.tests': ['data/*'],
+          'skbio.io.format.tests': ['data/*'],
           'skbio.stats.tests': ['data/*'],
           'skbio.stats.distance.tests': ['data/*'],
-          'skbio.stats.ordination.tests': ['data/*'],
-          'skbio.parse.sequences.tests': ['data/*'],
+          'skbio.stats.ordination.tests': ['data/*']
           }
       )
diff --git a/skbio/__init__.py b/skbio/__init__.py
index dd0c1ff..35067ce 100644
--- a/skbio/__init__.py
+++ b/skbio/__init__.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,47 +8,38 @@
 
 from __future__ import absolute_import, division, print_function
 
-from numpy.testing import Tester
+from skbio.util import TestRunner
 
 # Add skbio.io to sys.modules to prevent cycles in our imports
-import skbio.io
+import skbio.io  # noqa
 # imports included for convenience
-from skbio.sequence import (
-    BiologicalSequence, NucleotideSequence, DNA, DNASequence, RNA, RNASequence,
-    Protein, ProteinSequence)
+from skbio.sequence import Sequence, DNA, RNA, Protein, GeneticCode
 from skbio.stats.distance import DistanceMatrix
 from skbio.alignment import (
     local_pairwise_align_ssw, SequenceCollection, Alignment)
-from skbio.tree import (
-    TreeNode, nj)
-from skbio.parse.sequences import (
-    parse_fasta, parse_fastq, parse_qual, FastaIterator, FastqIterator,
-    SequenceIterator)
+from skbio.tree import TreeNode, nj
 from skbio.io import read, write
 
-skbio.io  # Stop flake8 error
 
-__all__ = ['BiologicalSequence', 'NucleotideSequence', 'DNA', 'DNASequence',
-           'RNA', 'RNASequence', 'Protein', 'ProteinSequence',
-           'DistanceMatrix', 'local_pairwise_align_ssw',
-           'SequenceCollection', 'Alignment', 'TreeNode', 'nj', 'parse_fasta',
-           'parse_fastq', 'parse_qual', 'FastaIterator',
-           'FastqIterator', 'SequenceIterator', 'read',
-           'write']
-
-test = Tester().test
+__all__ = ['Sequence', 'DNA', 'RNA', 'Protein', 'GeneticCode',
+           'DistanceMatrix', 'local_pairwise_align_ssw', 'SequenceCollection',
+           'Alignment', 'TreeNode', 'nj', 'read', 'write']
 
 __credits__ = "https://github.com/biocore/scikit-bio/graphs/contributors"
-__version__ = "0.2.3"
+__version__ = "0.4.0"
 
 mottos = [
     # 03/15/2014
     "It's gonna get weird, bro.",
     # 05/14/2014
-    "no cog yay"
+    "no cog yay",
+    # 03/18/2015
+    "bincount!",
 ]
 motto = mottos[-1]
 
+# Created at patorjk.com
+
 title = r"""
 *                                                    *
                _ _    _ _          _     _
@@ -62,6 +52,8 @@ title = r"""
 *                                                    *
 """
 
+# Created by @gregcaporaso
+
 art = r"""
 
            Opisthokonta
@@ -87,6 +79,7 @@ if __doc__ is None:
 else:
     __doc__ = title + art + __doc__
 
+test = TestRunner(__file__).test
+
 if __name__ == '__main__':
-    print(title)
-    print(art)
+    test()
diff --git a/skbio/alignment/__init__.py b/skbio/alignment/__init__.py
index ef24fb5..48c2819 100644
--- a/skbio/alignment/__init__.py
+++ b/skbio/alignment/__init__.py
@@ -1,5 +1,5 @@
 r"""
-Sequence collections and alignments (:mod:`skbio.alignment`)
+Alignments and Sequence collections (:mod:`skbio.alignment`)
 ============================================================
 
 .. currentmodule:: skbio.alignment
@@ -19,7 +19,6 @@ Data Structures
 
    SequenceCollection
    Alignment
-   StockholmAlignment
 
 Optimized (i.e., production-ready) Alignment Algorithms
 -------------------------------------------------------
@@ -60,49 +59,30 @@ Exceptions
 
    SequenceCollectionError
    AlignmentError
-   StockholmParseError
 
 Data Structure Examples
 -----------------------
->>> from StringIO import StringIO
->>> from skbio.alignment import SequenceCollection, Alignment
->>> from skbio.sequence import DNA
->>> seqs = [DNA("ACC--G-GGTA..", id="seq1"),
-...     DNA("TCC--G-GGCA..", id="seqs2")]
+>>> from skbio import SequenceCollection, Alignment, DNA
+>>> seqs = [DNA("ACC--G-GGTA..", metadata={'id':"seq1"}),
+...         DNA("TCC--G-GGCA..", metadata={'id':"seqs2"})]
 >>> a1 = Alignment(seqs)
 >>> a1
 <Alignment: n=2; mean +/- std length=13.00 +/- 0.00>
 
->>> seqs = [DNA("ACCGGG", id="seq1"),
-...     DNA("TCCGGGCA", id="seq2")]
+>>> seqs = [DNA("ACCGGG", metadata={'id':"seq1"}),
+...         DNA("TCCGGGCA", metadata={'id':"seq2"})]
 >>> s1 = SequenceCollection(seqs)
 >>> s1
 <SequenceCollection: n=2; mean +/- std length=7.00 +/- 1.00>
 
->>> from skbio.parse.sequences import parse_fasta
->>> fasta_f = StringIO('>seq1\n'
-...                    'CGATGTCGATCGATCGATCGATCAG\n'
-...                    '>seq2\n'
-...                    'CATCGATCGATCGATGCATGCATGCATG\n')
->>> s1 = SequenceCollection.from_fasta_records(parse_fasta(fasta_f), DNA)
+>>> fasta_lines = [u'>seq1\n',
+...                u'CGATGTCGATCGATCGATCGATCAG\n',
+...                u'>seq2\n',
+...                u'CATCGATCGATCGATGCATGCATGCATG\n']
+>>> s1 = SequenceCollection.read(fasta_lines, constructor=DNA)
 >>> s1
 <SequenceCollection: n=2; mean +/- std length=26.50 +/- 1.50>
 
->>> from skbio.sequence import RNA
->>> from skbio.alignment import StockholmAlignment
->>> seqs = [RNA("ACC--G-GGGU", id="seq1"),
-...     RNA("TCC--G-GGGA", id="seq2")]
->>> gc = {'SS_cons': '(((.....)))'}
->>> sto = StockholmAlignment(seqs, gc=gc)
->>> print(sto)
-# STOCKHOLM 1.0
-seq1          ACC--G-GGGU
-seq2          TCC--G-GGGA
-#=GC SS_cons  (((.....)))
-//
->>> sto.gc
-{'SS_cons': '(((.....)))'}
-
 Alignment Algorithm Examples
 ----------------------------
 
@@ -115,7 +95,7 @@ Using the convenient ``local_pairwise_align_ssw`` function:
 ...                 "ACTAAGGCTCTCTACCCCTCTCAGAGA",
 ...                 "ACTAAGGCTCCTAACCCCCTTTTCTCAGA"
 ...             )
->>> print alignment
+>>> print(alignment)
 >query
 ACTAAGGCTCTC-TACCC----CTCTCAGA
 >target
@@ -127,7 +107,7 @@ Using the ``StripedSmithWaterman`` object:
 >>> from skbio.alignment import StripedSmithWaterman
 >>> query = StripedSmithWaterman("ACTAAGGCTCTCTACCCCTCTCAGAGA")
 >>> alignment = query("AAAAAACTCTCTAAACTCACTAAGGCTCTCTACCCCTCTTCAGAGAAGTCGA")
->>> print alignment
+>>> print(alignment)
 ACTAAGGCTC...
 ACTAAGGCTC...
 Score: 49
@@ -150,14 +130,14 @@ way and finding the aligned sequence representations:
 ...     alignment = query(target_sequence)
 ...     alignments.append(alignment)
 ...
->>> print alignments[0]
+>>> print(alignments[0])
 ACTAAGGCT-...
 ACT-AGGCTC...
 Score: 38
 Length: 30
->>> print alignments[0].aligned_query_sequence
+>>> print(alignments[0].aligned_query_sequence)
 ACTAAGGCT---CTCTACCCCTCTCAGAGA
->>> print alignments[0].aligned_target_sequence
+>>> print(alignments[0].aligned_target_sequence)
 ACT-AGGCTCCCTTCTACCCCTCTCAGAGA
 
 Slow Alignment Algorithm Examples
@@ -201,13 +181,13 @@ And we can view the score of the alignment using the ``score`` method:
 25.0
 
 Similarly, we can perform global alignment of nucleotide sequences, and print
-the resulting alignment as fasta records:
+the resulting alignment in FASTA format:
 
 >>> from skbio.alignment import global_pairwise_align_nucleotide
 >>> s1 = "GCGTGCCTAAGGTATGCAAG"
 >>> s2 = "ACGTGCCTAGGTACGCAAG"
 >>> r = global_pairwise_align_nucleotide(s1, s2)
->>> print(r.to_fasta())
+>>> print(r)
 >0
 GCGTGCCTAAGGTATGCAAG
 >1
@@ -225,26 +205,27 @@ ACGTGCCTA-GGTACGCAAG
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
 
-from ._alignment import Alignment, SequenceCollection, StockholmAlignment
+from ._alignment import Alignment, SequenceCollection
 from ._pairwise import (
     local_pairwise_align_nucleotide, local_pairwise_align_protein,
     local_pairwise_align, global_pairwise_align_nucleotide,
     global_pairwise_align_protein, global_pairwise_align,
-    make_identity_substitution_matrix
+    make_identity_substitution_matrix, local_pairwise_align_ssw
 )
 from skbio.alignment._ssw_wrapper import (
-    StripedSmithWaterman, local_pairwise_align_ssw, AlignmentStructure)
-from ._exception import (SequenceCollectionError, StockholmParseError,
-                         AlignmentError)
+    StripedSmithWaterman, AlignmentStructure)
+from ._exception import (SequenceCollectionError, AlignmentError)
 
-__all__ = ['Alignment', 'SequenceCollection', 'StockholmAlignment',
+__all__ = ['Alignment', 'SequenceCollection',
            'StripedSmithWaterman', 'AlignmentStructure',
            'local_pairwise_align_ssw', 'SequenceCollectionError',
-           'StockholmParseError', 'AlignmentError', 'global_pairwise_align',
+           'AlignmentError', 'global_pairwise_align',
            'global_pairwise_align_nucleotide', 'global_pairwise_align_protein',
            'local_pairwise_align', 'local_pairwise_align_nucleotide',
            'local_pairwise_align_protein', 'make_identity_substitution_matrix']
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/alignment/_alignment.py b/skbio/alignment/_alignment.py
index c2dcbc8..2a7440e 100644
--- a/skbio/alignment/_alignment.py
+++ b/skbio/alignment/_alignment.py
@@ -9,19 +9,18 @@
 from __future__ import absolute_import, division, print_function
 from future.builtins import zip, range
 from future.utils import viewkeys, viewitems
-from six import StringIO
 
-import warnings
-from collections import Counter, defaultdict, OrderedDict
+from collections import Counter, defaultdict
 
 import numpy as np
 from scipy.stats import entropy
+import six
 
 from skbio._base import SkbioObject
+from skbio.sequence import Sequence
 from skbio.stats.distance import DistanceMatrix
-from skbio.io.util import open_file
-from ._exception import (SequenceCollectionError, StockholmParseError,
-                         AlignmentError)
+from ._exception import (SequenceCollectionError, AlignmentError)
+from skbio.util._decorator import experimental
 
 
 class SequenceCollection(SkbioObject):
@@ -29,34 +28,32 @@ class SequenceCollection(SkbioObject):
 
     Parameters
     ----------
-    seqs : list of `skbio.sequence.BiologicalSequence` objects
-        The `skbio.sequence.BiologicalSequence` objects to load into
-        a new `SequenceCollection` object.
+    seqs : list of `skbio.Sequence` objects
+        The `skbio.Sequence` objects to load into a new `SequenceCollection`
+        object.
     validate : bool, optional
         If True, runs the `is_valid` method after construction and raises
         `SequenceCollectionError` if ``is_valid == False``.
 
     Raises
     ------
-    skbio.alignment.SequenceCollectionError
+    skbio.SequenceCollectionError
         If ``validate == True`` and ``is_valid == False``.
 
     See Also
     --------
-    skbio.sequence.BiologicalSequence
-    skbio.sequence.NucleotideSequence
-    skbio.sequence.DNASequence
-    skbio.sequence.RNASequence
+    skbio
+    skbio.DNA
+    skbio.RNA
+    skbio.Protein
     Alignment
-    skbio.parse.sequences
-    skbio.parse.sequences.parse_fasta
 
     Examples
     --------
-    >>> from skbio.alignment import SequenceCollection
-    >>> from skbio.sequence import DNA
-    >>> sequences = [DNA('ACCGT', id="seq1"),
-    ...              DNA('AACCGGT', id="seq2")]
+    >>> from skbio import SequenceCollection
+    >>> from skbio import DNA
+    >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
+    ...              DNA('AACCGGT', metadata={'id': "seq2"})]
     >>> s1 = SequenceCollection(sequences)
     >>> s1
     <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
@@ -64,123 +61,48 @@ class SequenceCollection(SkbioObject):
     """
     default_write_format = 'fasta'
 
-    @classmethod
-    def from_fasta_records(cls, fasta_records, seq_constructor,
-                           validate=False):
-        r"""Initialize a `SequenceCollection` object
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``from_fasta_records`` will be removed in scikit-bio 0.3.0. It is
-           replaced by ``read``, which is a more general method for
-           deserializing FASTA-formatted files. ``read`` supports multiple file
-           formats, automatic file format detection, etc. by taking advantage
-           of scikit-bio's I/O registry system. See :mod:`skbio.io` for more
-           details.
-
-        Parameters
-        ----------
-        fasta_records : iterator of tuples
-            The records to load into a new `SequenceCollection` object. These
-            should be tuples of ``(sequence_id, sequence)``.
-        seq_constructor : skbio.sequence.BiologicalSequence
-        validate : bool, optional
-            If True, runs the `is_valid` method after construction and raises
-            `SequenceCollectionError` if ``is_valid == False``.
-
-        Returns
-        -------
-        SequenceCollection (or a derived class)
-            The new `SequenceCollection` object.
-
-        Raises
-        ------
-        skbio.alignment.SequenceCollectionError
-            If ``validate == True`` and ``is_valid == False``.
-
-        See Also
-        --------
-        skbio.sequence.BiologicalSequence
-        skbio.sequence.NucleotideSequence
-        skbio.sequence.DNASequence
-        skbio.sequence.RNASequence
-        Alignment
-        skbio.parse.sequences
-        skbio.parse.sequences.parse_fasta
-
-        Examples
-        --------
-        >>> from skbio.alignment import SequenceCollection
-        >>> from skbio.parse.sequences import parse_fasta
-        >>> from StringIO import StringIO
-        >>> from skbio.sequence import DNA
-        >>> fasta_f = StringIO('>seq1\nACCGT\n>seq2\nAACCGGT\n')
-        >>> s1 = SequenceCollection.from_fasta_records(
-        ...     parse_fasta(fasta_f), DNA)
-        >>> s1
-        <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
-
-        >>> records = [('seq1', 'ACCGT'), ('seq2', 'AACCGGT')]
-        >>> s1 = SequenceCollection.from_fasta_records(records, DNA)
-        >>> s1
-        <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
-
-        """
-        warnings.warn(
-            "SequenceCollection.from_fasta_records is deprecated and will be "
-            "removed in scikit-bio 0.3.0. Please update your code to use "
-            "SequenceCollection.read.", DeprecationWarning)
-
-        data = []
-        for seq_id, seq in fasta_records:
-            try:
-                id, description = seq_id.split(None, 1)
-            except ValueError:
-                id = seq_id.strip()
-                description = None
-            data.append(seq_constructor(seq, id=id,
-                                        description=description))
-
-        return cls(data, validate=validate)
-
-    def __init__(self, seqs, validate=False):
+    @experimental(as_of="0.4.0")
+    def __init__(self, seqs):
+        # TODO: find a good way to support generic Sequence objects in
+        # SequenceCollection and Alignment. The issue is that some methods
+        # assume that a sequence has knowledge of gap characters and a
+        # standard alphabet, which aren't present on Sequence. For now, if
+        # these methods are called by a user they'll get an error (likely
+        # an AttributeError).
         self._data = seqs
         self._id_to_index = {}
         for i, seq in enumerate(self._data):
-            id = seq.id
-            if id in self:
+            if 'id' not in seq.metadata:
+                raise SequenceCollectionError(
+                    "'id' must be included in the sequence metadata")
+            id_ = seq.metadata['id']
+
+            if id_ in self:
                 raise SequenceCollectionError(
                     "All sequence ids must be unique, but "
-                    "id '%s' is present multiple times." % id)
+                    "id '%s' is present multiple times." % id_)
             else:
-                self._id_to_index[seq.id] = i
-
-        # This is bad because we're making a second pass through the sequence
-        # collection to validate. We'll want to avoid this, but it's tricky
-        # because different subclasses will want to define their own is_valid
-        # methods.
-        if validate and not self.is_valid():
-            raise SequenceCollectionError(
-                "%s failed to validate." % self.__class__.__name__)
+                self._id_to_index[id_] = i
 
+    @experimental(as_of="0.4.0")
     def __contains__(self, id):
         r"""The in operator.
 
         Parameters
         ----------
         id : str
-            The id to look up in the `SequenceCollection`.
+            The `skbio.Sequence.id` to look up in the `SequenceCollection`.
 
         Returns
         -------
         bool
-            Indicates whether `id` corresponds to a sequence id
-            in the `SequenceCollection`.
-
-        .. shownumpydoc
+            Returns `True` if `id` is the `skbio.Sequence.id` of a sequence in
+            the `SequenceCollection`.
 
         """
         return id in self._id_to_index
 
+    @experimental(as_of="0.4.0")
     def __eq__(self, other):
         r"""The equality operator.
 
@@ -198,9 +120,7 @@ class SequenceCollection(SkbioObject):
         -----
         `SequenceCollection` objects are equal if they are the same type,
         contain the same number of sequences, and if each of the
-        `skbio.sequence.BiologicalSequence` objects, in order, are equal.
-
-        .. shownumpydoc
+        `skbio.Sequence` objects, in order, are equal.
 
         """
         if self.__class__ != other.__class__:
@@ -213,56 +133,74 @@ class SequenceCollection(SkbioObject):
                     return False
         return True
 
+    @experimental(as_of="0.4.0")
     def __getitem__(self, index):
         r"""The indexing operator.
 
         Parameters
         ----------
         index : int, str
-            The position or sequence id of the
-            `skbio.sequence.BiologicalSequence` to return from the
-            `SequenceCollection`.
+            The position or sequence id of the `skbio.Sequence` to return from
+            the `SequenceCollection`.
 
         Returns
         -------
-        `skbio.sequence.BiologicalSequence`
-            The `skbio.sequence.BiologicalSequence` at the specified
-            index in the `SequenceCollection`.
+        skbio.Sequence
+            The `skbio.Sequence` at the specified index in the
+            `SequenceCollection`.
 
         Examples
         --------
-        >>> from skbio.alignment import SequenceCollection
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('ACCGT', id="seq1"),
-        ...              DNA('AACCGGT', id="seq2")]
-        >>> s1 = SequenceCollection(sequences)
-        >>> s1[0]
-        <DNASequence: ACCGT (length: 5)>
-        >>> s1["seq1"]
-        <DNASequence: ACCGT (length: 5)>
-
-        .. shownumpydoc
+        >>> from skbio import DNA, SequenceCollection
+        >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
+        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
+        >>> sc = SequenceCollection(sequences)
+        >>> sc[0]
+        DNA
+        -----------------------------
+        Metadata:
+            'id': 'seq1'
+        Stats:
+            length: 5
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 60.00%
+        -----------------------------
+        0 ACCGT
+        >>> sc["seq1"]
+        DNA
+        -----------------------------
+        Metadata:
+            'id': 'seq1'
+        Stats:
+            length: 5
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 60.00%
+        -----------------------------
+        0 ACCGT
 
         """
-        if isinstance(index, str):
+        if isinstance(index, six.string_types):
             return self.get_seq(index)
         else:
             return self._data[index]
 
+    @experimental(as_of="0.4.0")
     def __iter__(self):
         r"""The iter operator.
 
         Returns
         -------
         iterator
-            `skbio.sequence.BiologicalSequence` iterator for the
-            `SequenceCollection`.
-
-        .. shownumpydoc
+            `skbio.Sequence` iterator for the `SequenceCollection`.
 
         """
         return iter(self._data)
 
+    @experimental(as_of="0.4.0")
     def __len__(self):
         r"""The len operator.
 
@@ -271,11 +209,10 @@ class SequenceCollection(SkbioObject):
         int
             The number of sequences in the `SequenceCollection`.
 
-        .. shownumpydoc
-
         """
         return self.sequence_count()
 
+    @experimental(as_of="0.4.0")
     def __ne__(self, other):
         r"""The inequality operator.
 
@@ -293,11 +230,10 @@ class SequenceCollection(SkbioObject):
         See `SequenceCollection.__eq__` for a description of what it means for
         a pair of `SequenceCollection` objects to be equal.
 
-        .. shownumpydoc
-
         """
         return not self.__eq__(other)
 
+    @experimental(as_of="0.4.0")
     def __repr__(self):
         r"""The repr method.
 
@@ -314,36 +250,34 @@ class SequenceCollection(SkbioObject):
 
         Examples
         --------
-        >>> from skbio.alignment import SequenceCollection
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('ACCGT', id="seq1"),
-        ...              DNA('AACCGGT', id="seq2")]
+        >>> from skbio import SequenceCollection
+        >>> from skbio import DNA
+        >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
+        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
         >>> s1 = SequenceCollection(sequences)
         >>> print(repr(s1))
         <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
 
-        .. shownumpydoc
-
         """
         cn = self.__class__.__name__
         count, center, spread = self.distribution_stats()
         return "<%s: n=%d; mean +/- std length=%.2f +/- %.2f>" \
             % (cn, count, center, spread)
 
+    @experimental(as_of="0.4.0")
     def __reversed__(self):
         """The reversed method.
 
         Returns
         -------
         iterator
-            `skbio.sequence.BiologicalSequence` iterator for the
-            `SequenceCollection` in reverse order.
-
-        .. shownumpydoc
+            `skbio.Sequence` iterator for the `SequenceCollection` in reverse
+            order.
 
         """
         return reversed(self._data)
 
+    @experimental(as_of="0.4.0")
     def __str__(self):
         r"""The str method.
 
@@ -352,15 +286,10 @@ class SequenceCollection(SkbioObject):
         str
             Fasta-formatted string of all sequences in the object.
 
-        .. shownumpydoc
-
         """
-        fh = StringIO()
-        self.write(fh, format='fasta')
-        fasta_str = fh.getvalue()
-        fh.close()
-        return fasta_str
+        return str(''.join(self.write([], format='fasta')))
 
+    @experimental(as_of="0.4.0")
     def distances(self, distance_fn):
         """Compute distances between all pairs of sequences
 
@@ -368,65 +297,36 @@ class SequenceCollection(SkbioObject):
         ----------
         distance_fn : function
             Function for computing the distance between a pair of sequences.
-            This must take two sequences as input (as
-            `skbio.sequence.BiologicalSequence` objects) and return a
-            single integer or float value.
+            This must take two sequences as input (as `skbio.Sequence` objects)
+            and return a single integer or float value.
 
         Returns
         -------
         skbio.DistanceMatrix
             Matrix containing the distances between all pairs of sequences.
 
-        Raises
-        ------
-        skbio.util.exception.BiologicalSequenceError
-            If ``len(self) != len(other)`` and ``distance_fn`` ==
-            ``scipy.spatial.distance.hamming``.
-
-        See Also
-        --------
-        skbio.DistanceMatrix
-        scipy.spatial.distance.hamming
-
-        Examples
-        --------
-        >>> from scipy.spatial.distance import hamming
-        >>> from skbio.alignment import SequenceCollection
-        >>> from skbio.sequence import DNA
-        >>> seqs = [DNA("ACCGGGTT", id="s1"),
-        ...         DNA("ACTTGGTT", id="s2"),
-        ...         DNA("ACTAGGTT", id="s3")]
-        >>> a1 = SequenceCollection(seqs)
-        >>> print(a1.distances(hamming))
-        3x3 distance matrix
-        IDs:
-        's1', 's2', 's3'
-        Data:
-        [[ 0.     0.25   0.25 ]
-         [ 0.25   0.     0.125]
-         [ 0.25   0.125  0.   ]]
-
         """
         sequence_count = self.sequence_count()
         dm = np.zeros((sequence_count, sequence_count))
         ids = []
         for i in range(sequence_count):
             self_i = self[i]
-            ids.append(self_i.id)
+            ids.append(self_i.metadata['id'])
             for j in range(i):
                 dm[i, j] = dm[j, i] = self_i.distance(self[j], distance_fn)
         return DistanceMatrix(dm, ids)
 
+    @experimental(as_of="0.4.0")
     def distribution_stats(self, center_f=np.mean, spread_f=np.std):
         r"""Return sequence count, and center and spread of sequence lengths
 
         Parameters
         ----------
         center_f : function
-            Should take a list-like object and return a single value
+            Should take an array_like object and return a single value
             representing the center of the distribution.
         spread_f : function
-            Should take a list-like object and return a single value
+            Should take an array_like object and return a single value
             representing the spread of the distribution.
 
         Returns
@@ -442,10 +342,10 @@ class SequenceCollection(SkbioObject):
 
         Examples
         --------
-        >>> from skbio.alignment import SequenceCollection
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('ACCGT', id="seq1"),
-        ...              DNA('AACCGGT', id="seq2")]
+        >>> from skbio import SequenceCollection
+        >>> from skbio import DNA
+        >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
+        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
         >>> s1 = SequenceCollection(sequences)
         >>> s1.distribution_stats()
         (2, 6.0, 1.0)
@@ -459,22 +359,22 @@ class SequenceCollection(SkbioObject):
             return (sequence_count, center_f(sequence_lengths),
                     spread_f(sequence_lengths))
 
+    @experimental(as_of="0.4.0")
     def degap(self):
         r"""Return a new `SequenceCollection` with all gap characters removed.
 
         Returns
         -------
         SequenceCollection
-            A new `SequenceCollection` where
-            `skbio.sequence.BiologicalSequence.degap` has been called on
-            each sequence.
+            A new `SequenceCollection` where `skbio.Sequence.degap` has been
+            called on each sequence.
 
         Examples
         --------
-        >>> from skbio.alignment import SequenceCollection
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('A--CCGT.', id="seq1"),
-        ...              DNA('.AACCG-GT.', id="seq2")]
+        >>> from skbio import SequenceCollection
+        >>> from skbio import DNA
+        >>> sequences = [DNA('A--CCGT.', metadata={'id': "seq1"}),
+        ...              DNA('.AACCG-GT.', metadata={'id': "seq2"})]
         >>> s1 = SequenceCollection(sequences)
         >>> s2 = s1.degap()
         >>> s2
@@ -483,6 +383,7 @@ class SequenceCollection(SkbioObject):
         """
         return SequenceCollection([seq.degap() for seq in self])
 
+    @experimental(as_of="0.4.0")
     def get_seq(self, id):
         r"""Return a sequence from the `SequenceCollection` by its id.
 
@@ -493,8 +394,8 @@ class SequenceCollection(SkbioObject):
 
         Returns
         -------
-        skbio.sequence.BiologicalSequence
-            The `skbio.sequence.BiologicalSequence` with `id`.
+        skbio.Sequence
+            The `skbio.Sequence` with `id`.
 
         Raises
         ------
@@ -503,10 +404,10 @@ class SequenceCollection(SkbioObject):
 
         Examples
         --------
-        >>> from skbio.alignment import SequenceCollection
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('A--CCGT.', id="seq1"),
-        ...              DNA('.AACCG-GT.', id="seq2")]
+        >>> from skbio import SequenceCollection
+        >>> from skbio import DNA
+        >>> sequences = [DNA('A--CCGT.', metadata={'id': "seq1"}),
+        ...              DNA('.AACCG-GT.', metadata={'id': "seq2"})]
         >>> s1 = SequenceCollection(sequences)
         >>> print(s1['seq1'])
         A--CCGT.
@@ -514,49 +415,52 @@ class SequenceCollection(SkbioObject):
         """
         return self[self._id_to_index[id]]
 
+    @experimental(as_of="0.4.0")
     def ids(self):
-        """Returns the `BiologicalSequence` ids
+        """Returns the `Sequence` ids
 
         Returns
         -------
         list
-            The ordered list of ids for the
-            `skbio.sequence.BiologicalSequence` objects in the
+            The ordered list of ids for the `skbio.Sequence` objects in the
             `SequenceCollection`.
 
         Examples
         --------
-        >>> from skbio.alignment import SequenceCollection
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('A--CCGT.', id="seq1"),
-        ...              DNA('.AACCG-GT.', id="seq2")]
+        >>> from skbio import SequenceCollection
+        >>> from skbio import DNA
+        >>> sequences = [DNA('A--CCGT.', metadata={'id': "seq1"}),
+        ...              DNA('.AACCG-GT.', metadata={'id': "seq2"})]
         >>> s1 = SequenceCollection(sequences)
         >>> print(s1.ids())
         ['seq1', 'seq2']
 
         """
-        return [seq.id for seq in self]
+        return [seq.metadata['id'] for seq in self]
 
-    def update_ids(self, ids=None, fn=None, prefix=""):
+    @experimental(as_of="0.4.0")
+    def update_ids(self, ids=None, func=None, prefix=""):
         """Update sequence IDs on the sequence collection.
 
         IDs can be updated by providing a sequence of new IDs (`ids`) or a
-        function that maps current IDs to new IDs (`fn`).
+        function that maps current IDs to new IDs (`func`).
 
-        Default behavior (if `ids` and `fn` are not provided) is to create new
-        IDs that are unique postive integers (starting at 1) cast as strings,
-        optionally preceded by `prefix`. For example, ``('1', '2', '3', ...)``.
+        Default behavior (if `ids` and `func` are not provided) is to create
+        new IDs that are unique postive integers (starting at 1) cast as
+        strings, optionally preceded by `prefix`. For example, ``('1', '2',
+        '3', ...)``.
 
         Parameters
         ----------
         ids : sequence of str, optional
             New IDs to update on the sequence collection.
-        fn : function, optional
+        func : function, optional
             Function accepting a sequence of current IDs and returning a
             sequence of new IDs to update on the sequence collection.
         prefix : str, optional
-            If `ids` and `fn` are both ``None``, `prefix` is prepended to each
-            new integer-based ID (see description of default behavior above).
+            If `ids` and `func` are both ``None``, `prefix` is prepended to
+            each new integer-based ID (see description of default behavior
+            above).
 
         Returns
         -------
@@ -569,8 +473,8 @@ class SequenceCollection(SkbioObject):
         Raises
         ------
         SequenceCollectionError
-            If both `ids` and `fn` are provided, `prefix` is provided with
-            either `ids` or `fn`, or the number of new IDs does not match the
+            If both `ids` and `func` are provided, `prefix` is provided with
+            either `ids` or `func`, or the number of new IDs does not match the
             number of sequences in the sequence collection.
 
         Notes
@@ -590,8 +494,8 @@ class SequenceCollection(SkbioObject):
         and "def":
 
         >>> from skbio import DNA, SequenceCollection
-        >>> sequences = [DNA('A--CCGT.', id="abc"),
-        ...              DNA('.AACCG-GT.', id="def")]
+        >>> sequences = [DNA('A--CCGT.', metadata={'id': "abc"}),
+        ...              DNA('.AACCG-GT.', metadata={'id': "def"})]
         >>> s1 = SequenceCollection(sequences)
         >>> s1.ids()
         ['abc', 'def']
@@ -608,7 +512,7 @@ class SequenceCollection(SkbioObject):
 
         >>> def id_mapper(ids):
         ...     return [id_ + '-new' for id_ in ids]
-        >>> s3, new_to_old_ids = s1.update_ids(fn=id_mapper)
+        >>> s3, new_to_old_ids = s1.update_ids(func=id_mapper)
         >>> s3.ids()
         ['abc-new', 'def-new']
 
@@ -619,26 +523,26 @@ class SequenceCollection(SkbioObject):
         ['ghi', 'jkl']
 
         """
-        if ids is not None and fn is not None:
-            raise SequenceCollectionError("ids and fn cannot both be "
+        if ids is not None and func is not None:
+            raise SequenceCollectionError("ids and func cannot both be "
                                           "provided.")
-        if (ids is not None and prefix) or (fn is not None and prefix):
+        if (ids is not None and prefix) or (func is not None and prefix):
             raise SequenceCollectionError("prefix cannot be provided if ids "
-                                          "or fn is provided.")
+                                          "or func is provided.")
 
         if ids is not None:
-            def fn(_):
+            def func(_):
                 return ids
 
-        elif fn is None:
-            def fn(_):
+        elif func is None:
+            def func(_):
                 new_ids = []
                 for i in range(1, len(self) + 1):
                     new_ids.append("%s%d" % (prefix, i))
                 return new_ids
 
         old_ids = self.ids()
-        new_ids = fn(old_ids)
+        new_ids = func(old_ids)
 
         if len(new_ids) != len(old_ids):
             raise SequenceCollectionError(
@@ -649,60 +553,13 @@ class SequenceCollection(SkbioObject):
 
         new_seqs = []
         for new_id, seq in zip(new_ids, self):
-            new_seqs.append(seq.copy(id=new_id))
+            new_seq = seq.copy()
+            new_seq.metadata['id'] = new_id
+            new_seqs.append(new_seq)
 
         return self.__class__(new_seqs), new_to_old_ids
 
-    def int_map(self, prefix=""):
-        """Create an integer-based mapping of sequence ids
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``SequenceCollection.int_map`` will be removed in scikit-bio 0.3.0
-           in favor of ``SequenceCollection.update_ids``, which provides a
-           generalized way of updating IDs on a ``SequenceCollection``. The
-           default behavior of ``SequenceCollection.update_ids`` matches the
-           behavior in ``int_map``, except that a new ``SequenceCollection`` is
-           returned instead of a ``dict``.
-
-        Parameters
-        ----------
-        prefix : str
-            String prefix for new integer-based ids.
-
-        Returns
-        -------
-        dict
-            Mapping of new ids to sequences.
-        dict
-            Mapping of new ids to old ids.
-
-        Notes
-        -----
-        This is useful when writing sequences out for use with programs that
-        are picky about their sequence ids (e.g., raXML).
-
-        The integer-based ids will be strings, for consistency (e.g., if prefix
-        is passed) and begin at 1.
-
-        References
-        ----------
-        RAxML Version 8: A tool for Phylogenetic Analysis and Post-Analysis of
-        Large Phylogenies". In Bioinformatics, 2014
-
-        """
-        warnings.warn(
-            "SequenceCollection.int_map is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use "
-            "SequenceCollection.update_ids instead.", DeprecationWarning)
-
-        int_keys = []
-        int_map = []
-        for i, seq in enumerate(self):
-            k = ("%s%d" % (prefix, i+1))
-            int_map.append((k, seq))
-            int_keys.append((k, seq.id))
-        return dict(int_map), dict(int_keys)
-
+    @experimental(as_of="0.4.0")
     def is_empty(self):
         """Return True if the SequenceCollection is empty
 
@@ -715,73 +572,21 @@ class SequenceCollection(SkbioObject):
         """
         return self.sequence_count() == 0
 
-    def is_valid(self):
-        """Return True if the SequenceCollection is valid
-
-        Returns
-        -------
-        bool
-            ``True`` if `self` is valid, and ``False`` otherwise.
-
-        Notes
-        -----
-        Validity is defined as having no sequences containing characters
-        outside of their valid character sets.
-
-        See Also
-        --------
-        skbio.alignment.BiologicalSequence.is_valid
-
-        Examples
-        --------
-        >>> from skbio.alignment import SequenceCollection
-        >>> from skbio.sequence import DNA, RNA
-        >>> sequences = [DNA('ACCGT', id="seq1"),
-        ...              DNA('AACCGGT', id="seq2")]
-        >>> s1 = SequenceCollection(sequences)
-        >>> print(s1.is_valid())
-        True
-        >>> sequences = [RNA('ACCGT', id="seq1"),
-        ...              RNA('AACCGGT', id="seq2")]
-        >>> s1 = SequenceCollection(sequences)
-        >>> print(s1.is_valid())
-        False
-
-        """
-        return self._validate_character_set()
-
+    @experimental(as_of="0.4.0")
     def iteritems(self):
         """Generator of id, sequence tuples
 
         Returns
         -------
         generator of tuples
-            Each tuple contains ordered
-            (`skbio.sequence.BiologicalSequence.id`,
-            `skbio.sequence.BiologicalSequence`) pairs.
+            Each tuple contains ordered (`skbio.Sequence.id`, `skbio.Sequence`)
+            pairs.
 
         """
         for seq in self:
-            yield seq.id, seq
-
-    def lower(self):
-        """Converts all sequences to lowercase
-
-        Returns
-        -------
-        SequenceCollection
-            New `SequenceCollection` object where
-            `skbio.sequence.BiologicalSequence.lower()` has been called
-            on each sequence.
-
-        See Also
-        --------
-        skbio.sequence.BiologicalSequence.lower
-        upper
-
-        """
-        return self.__class__([seq.lower() for seq in self])
+            yield seq.metadata['id'], seq
 
+    @experimental(as_of="0.4.0")
     def sequence_count(self):
         """Return the count of sequences in the `SequenceCollection`
 
@@ -795,10 +600,21 @@ class SequenceCollection(SkbioObject):
         sequence_lengths
         Alignment.sequence_length
 
+        Examples
+        --------
+        >>> from skbio import SequenceCollection
+        >>> from skbio import DNA
+        >>> sequences = [DNA('A--CCGT.', metadata={'id': "seq1"}),
+        ...              DNA('.AACCG-GT.', metadata={'id': "seq2"})]
+        >>> s1 = SequenceCollection(sequences)
+        >>> print(s1.sequence_count())
+        2
+
         """
         return len(self._data)
 
-    def k_word_frequencies(self, k, overlapping=True):
+    @experimental(as_of="0.4.0")
+    def kmer_frequencies(self, k, overlap=True, relative=False):
         """Return k-word frequencies for sequences in ``SequenceCollection``.
 
         Parameters
@@ -812,7 +628,7 @@ class SequenceCollection(SkbioObject):
         Returns
         -------
         list
-            List of ``collections.defaultdict`` objects, one for each sequence
+            List of ``collections.Counter`` objects, one for each sequence
             in the ``SequenceCollection``, representing the frequency of each
             k-word in each sequence of the ``SequenceCollection``.
 
@@ -823,24 +639,26 @@ class SequenceCollection(SkbioObject):
         Examples
         --------
         >>> from skbio import SequenceCollection, DNA
-        >>> sequences = [DNA('A', id="seq1"),
-        ...              DNA('AT', id="seq2"),
-        ...              DNA('TTTT', id="seq3")]
+        >>> sequences = [DNA('A', metadata={'id': "seq1"}),
+        ...              DNA('AT', metadata={'id': "seq2"}),
+        ...              DNA('TTTT', metadata={'id': "seq3"})]
         >>> s1 = SequenceCollection(sequences)
-        >>> for freqs in s1.k_word_frequencies(1):
+        >>> for freqs in s1.kmer_frequencies(1):
         ...     print(freqs)
-        defaultdict(<type 'float'>, {'A': 1.0})
-        defaultdict(<type 'float'>, {'A': 0.5, 'T': 0.5})
-        defaultdict(<type 'float'>, {'T': 1.0})
-        >>> for freqs in s1.k_word_frequencies(2):
+        Counter({'A': 1})
+        Counter({'A': 1, 'T': 1})
+        Counter({'T': 4})
+        >>> for freqs in s1.kmer_frequencies(2):
         ...     print(freqs)
-        defaultdict(<type 'float'>, {})
-        defaultdict(<type 'float'>, {'AT': 1.0})
-        defaultdict(<type 'float'>, {'TT': 1.0})
+        Counter()
+        Counter({'AT': 1})
+        Counter({'TT': 3})
 
         """
-        return [s.k_word_frequencies(k, overlapping) for s in self]
+        return [s.kmer_frequencies(k, overlap=overlap, relative=relative)
+                for s in self]
 
+    @experimental(as_of="0.4.0")
     def sequence_lengths(self):
         """Return lengths of the sequences in the `SequenceCollection`
 
@@ -853,96 +671,30 @@ class SequenceCollection(SkbioObject):
         --------
         sequence_count
 
-        """
-        return [len(seq) for seq in self]
-
-    def to_fasta(self):
-        """Return fasta-formatted string representing the `SequenceCollection`
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``to_fasta`` will be removed in scikit-bio 0.3.0. It is replaced by
-           ``write``, which is a more general method for serializing
-           FASTA-formatted files. ``write`` supports multiple file formats by
-           taking advantage of scikit-bio's I/O registry system. See
-           :mod:`skbio.io` for more details.
-
-        Returns
-        -------
-        str
-            A fasta-formatted string representing the `SequenceCollection`.
-
-        See Also
-        --------
-        skbio.parse.sequences.parse_fasta
-        """
-        warnings.warn(
-            "SequenceCollection.to_fasta is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use "
-            "SequenceCollection.write.", DeprecationWarning)
-
-        return ''.join([seq.to_fasta() for seq in self._data])
-
-    def toFasta(self):
-        """Return fasta-formatted string representing the `SequenceCollection`
-
-        .. note:: Deprecated in skbio 0.3.0
-                  `SequenceCollection.toFasta` will be removed in skbio 0.2.0,
-                  it is replaced by `SequenceCollection.to_fasta` as the latter
-                  adheres to PEP8 naming conventions. This is necessary to keep
-                  in place now as these objects are sometimes passed into
-                  code that expects a `cogent.alignment.Alignment` object
-                  (e.g., PyNAST), so we need to support the method with this
-                  name.
-
-        Returns
-        -------
-        str
-            A fasta-formatted string representing the `SequenceCollection`.
-
-        """
-        warnings.warn(
-            "SequenceCollection.toFasta() is deprecated. You should use "
-            "SequenceCollection.to_fasta().", DeprecationWarning)
-        return self.to_fasta()
-
-    def upper(self):
-        """Converts all sequences to uppercase
-
-        Returns
-        -------
-        SequenceCollection
-            New `SequenceCollection` object where `BiologicalSequence.upper()`
-            has been called on each sequence.
-
-        See Also
+        Examples
         --------
-        BiologicalSequence.upper
-        lower
-
-        """
-        return self.__class__([seq.upper() for seq in self])
+        >>> from skbio import SequenceCollection
+        >>> from skbio import DNA
+        >>> sequences = [DNA('ACCGT', metadata={'id': "seq1"}),
+        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
+        >>> s1 = SequenceCollection(sequences)
+        >>> print(s1.sequence_lengths())
+        [5, 7]
 
-    def _validate_character_set(self):
-        """Return ``True`` if all sequences are valid, ``False`` otherwise
         """
-        for seq in self:
-            if not seq.is_valid():
-                return False
-        return True
+        return [len(seq) for seq in self]
 
 
 class Alignment(SequenceCollection):
     """Class for storing alignments of biological sequences.
 
-    The ``Alignment`` class adds convenience methods to the
-    ``SequenceCollection`` class to make it easy to work with alignments of
-    biological sequences.
+    The ``Alignment`` class adds methods to the ``SequenceCollection`` class
+    that are useful for working with aligned biological sequences.
 
     Parameters
     ----------
-    seqs : list of `skbio.sequence.BiologicalSequence` objects
-        The `skbio.sequence.BiologicalSequence` objects to load into
-        a new `Alignment` object.
+    seqs : list of `skbio.Sequence` objects
+        The `skbio.Sequence` objects to load into a new `Alignment` object.
     validate : bool, optional
         If True, runs the `is_valid` method after construction and raises
         `SequenceCollectionError` if ``is_valid == False``.
@@ -954,13 +706,13 @@ class Alignment(SequenceCollection):
         if applicable (usually only if the alignment was just constructed using
         a local alignment algorithm). Note that these should be indexes into
         the unaligned sequences, though the `Alignment` object itself doesn't
-        know about these.
+        know about these unless it is degapped.
 
     Raises
     ------
-    skbio.alignment.SequenceCollectionError
+    skbio.SequenceCollectionError
         If ``validate == True`` and ``is_valid == False``.
-    skbio.alignment.AlignmentError
+    skbio.AlignmentError
         If not all the sequences have the same length.
 
     Notes
@@ -971,29 +723,27 @@ class Alignment(SequenceCollection):
 
     See Also
     --------
-    skbio.sequence.BiologicalSequence
-    skbio.sequence.NucleotideSequence
-    skbio.sequence.DNASequence
-    skbio.sequence.RNASequence
+    skbio
+    skbio.DNA
+    skbio.RNA
+    skbio.Protein
     SequenceCollection
-    skbio.parse.sequences
-    skbio.parse.sequences.parse_fasta
 
     Examples
     --------
-    >>> from skbio.alignment import Alignment
-    >>> from skbio.sequence import DNA
-    >>> sequences = [DNA('A--CCGT', id="seq1"),
-    ...              DNA('AACCGGT', id="seq2")]
+    >>> from skbio import Alignment
+    >>> from skbio import DNA
+    >>> sequences = [DNA('A--CCGT', metadata={'id': "seq1"}),
+    ...              DNA('AACCGGT', metadata={'id': "seq2"})]
     >>> a1 = Alignment(sequences)
     >>> a1
     <Alignment: n=2; mean +/- std length=7.00 +/- 0.00>
 
     """
 
-    def __init__(self, seqs, validate=False, score=None,
-                 start_end_positions=None):
-        super(Alignment, self).__init__(seqs, validate)
+    @experimental(as_of="0.4.0")
+    def __init__(self, seqs, score=None, start_end_positions=None):
+        super(Alignment, self).__init__(seqs)
 
         if not self._validate_lengths():
             raise AlignmentError("All sequences need to be of equal length.")
@@ -1002,6 +752,7 @@ class Alignment(SequenceCollection):
             self._score = float(score)
         self._start_end_positions = start_end_positions
 
+    @experimental(as_of="0.4.0")
     def distances(self, distance_fn=None):
         """Compute distances between all pairs of sequences
 
@@ -1009,34 +760,26 @@ class Alignment(SequenceCollection):
         ----------
         distance_fn : function, optional
             Function for computing the distance between a pair of sequences.
-            This must take two sequences as input (as
-            `skbio.sequence.BiologicalSequence` objects) and return a
-            single integer or float value. Defaults to
-            `scipy.spatial.distance.hamming`.
+            This must take two sequences as input (as `skbio.Sequence` objects)
+            and return a single integer or float value. Defaults to the default
+            distance function used by `skbio.Sequence.distance`.
 
         Returns
         -------
         skbio.DistanceMatrix
             Matrix containing the distances between all pairs of sequences.
 
-        Raises
-        ------
-        skbio.util.exception.BiologicalSequenceError
-            If ``len(self) != len(other)`` and ``distance_fn`` ==
-            ``scipy.spatial.distance.hamming``.
-
         See Also
         --------
-        skbio.DistanceMatrix
-        scipy.spatial.distance.hamming
+        skbio.Sequence.distance
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> seqs = [DNA("A-CCGGG", id="s1"),
-        ...         DNA("ATCC--G", id="s2"),
-        ...         DNA("ATCCGGA", id="s3")]
+        >>> from skbio import Alignment
+        >>> from skbio import DNA
+        >>> seqs = [DNA("A-CCGGG", metadata={'id': "s1"}),
+        ...         DNA("ATCC--G", metadata={'id': "s2"}),
+        ...         DNA("ATCCGGA", metadata={'id': "s3"})]
         >>> a1 = Alignment(seqs)
         >>> print(a1.distances())
         3x3 distance matrix
@@ -1050,6 +793,7 @@ class Alignment(SequenceCollection):
         """
         return super(Alignment, self).distances(distance_fn)
 
+    @experimental(as_of="0.4.0")
     def score(self):
         """Returns the score of the alignment.
 
@@ -1068,6 +812,7 @@ class Alignment(SequenceCollection):
         """
         return self._score
 
+    @experimental(as_of="0.4.0")
     def start_end_positions(self):
         """Returns the (start, end) positions for each aligned sequence.
 
@@ -1096,6 +841,7 @@ class Alignment(SequenceCollection):
         """
         return self._start_end_positions
 
+    @experimental(as_of="0.4.0")
     def subalignment(self, seqs_to_keep=None, positions_to_keep=None,
                      invert_seqs_to_keep=False,
                      invert_positions_to_keep=False):
@@ -1125,11 +871,11 @@ class Alignment(SequenceCollection):
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> seqs = [DNA("A-CCGGG", id="s1"),
-        ...         DNA("ATCC--G", id="s2"),
-        ...         DNA("ATCCGGA", id="s3")]
+        >>> from skbio import Alignment
+        >>> from skbio import DNA
+        >>> seqs = [DNA("A-CCGGG", metadata={'id': "s1"}),
+        ...         DNA("ATCC--G", metadata={'id': "s2"}),
+        ...         DNA("ATCCGGA", metadata={'id': "s3"})]
         >>> a1 = Alignment(seqs)
         >>> a1
         <Alignment: n=3; mean +/- std length=7.00 +/- 0.00>
@@ -1211,7 +957,7 @@ class Alignment(SequenceCollection):
         # iterate over sequences
         for sequence_index, seq in enumerate(self):
             # determine if we're keeping the current sequence
-            if keep_seq(sequence_index, seq.id):
+            if keep_seq(sequence_index, seq.metadata['id']):
                 # slice the current sequence with the indices
                 result.append(seq[indices])
             # if we're not keeping the current sequence, move on to the next
@@ -1221,6 +967,7 @@ class Alignment(SequenceCollection):
         # and return it
         return self.__class__(result)
 
+    @experimental(as_of="0.4.0")
     def iter_positions(self, constructor=None):
         """Generator of Alignment positions (i.e., columns)
 
@@ -1229,10 +976,10 @@ class Alignment(SequenceCollection):
         constructor : type, optional
             Constructor function for creating the positional values. By
             default, these will be the same type as corresponding
-            `skbio.sequence.BiologicalSequence` in the `Alignment` object, but
-            you can pass a `skbio.sequence.BiologicalSequence` class here to
-            ensure that they are all of consistent type, or ``str`` to have
-            them returned as strings.
+            `skbio.Sequence` in the `Alignment` object, but
+            you can pass a `skbio.Sequence` class here to ensure that they are
+            all of consistent type, or ``str`` to have them returned as
+            strings.
 
         Returns
         -------
@@ -1246,23 +993,38 @@ class Alignment(SequenceCollection):
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('ACCGT--', id="seq1"),
-        ...              DNA('AACCGGT', id="seq2")]
-        >>> a1 = Alignment(sequences)
-        >>> for position in a1.iter_positions():
-        ...     print(position)
-        [<DNASequence: A (length: 1)>, <DNASequence: A (length: 1)>]
-        [<DNASequence: C (length: 1)>, <DNASequence: A (length: 1)>]
-        [<DNASequence: C (length: 1)>, <DNASequence: C (length: 1)>]
-        [<DNASequence: G (length: 1)>, <DNASequence: C (length: 1)>]
-        [<DNASequence: T (length: 1)>, <DNASequence: G (length: 1)>]
-        [<DNASequence: - (length: 1)>, <DNASequence: G (length: 1)>]
-        [<DNASequence: - (length: 1)>, <DNASequence: T (length: 1)>]
-
-        >>> for position in a1.iter_positions(constructor=str):
-        ...     print(position)
+        >>> from skbio import DNA, Alignment
+        >>> sequences = [DNA('ACCGT--', metadata={'id': "seq1"}),
+        ...              DNA('AACCGGT', metadata={'id': "seq2"})]
+        >>> aln = Alignment(sequences)
+        >>> for position in aln.iter_positions():
+        ...     for seq in position:
+        ...         print(seq.metadata['id'], seq)
+        ...     print('')
+        seq1 A
+        seq2 A
+        <BLANKLINE>
+        seq1 C
+        seq2 A
+        <BLANKLINE>
+        seq1 C
+        seq2 C
+        <BLANKLINE>
+        seq1 G
+        seq2 C
+        <BLANKLINE>
+        seq1 T
+        seq2 G
+        <BLANKLINE>
+        seq1 -
+        seq2 G
+        <BLANKLINE>
+        seq1 -
+        seq2 T
+        <BLANKLINE>
+
+        >>> for position in aln.iter_positions(constructor=str):
+        ...     position
         ['A', 'A']
         ['C', 'A']
         ['C', 'C']
@@ -1279,30 +1041,20 @@ class Alignment(SequenceCollection):
             position = [constructor(seq[i]) for seq in self]
             yield position
 
-    def majority_consensus(self, constructor=None):
-        """Return the majority consensus sequence for the `Alignment`
-
-        .. note:: `constructor` parameter deprecated in scikit-bio 0.2.0-dev
-           `constructor` parameter will be removed in scikit-bio 0.3.0 as its
-           most common use is to convert to ``str``, and this functionality is
-           already accessible by calling ``str`` on the returned
-           ``BiologicalSequence`` (e.g., ``str(seq)``).
-
-        Parameters
-        ----------
-        constructor : function, optional
-            Constructor function for creating the consensus sequence. By
-            default, this will be the same type as the first sequence in the
-            `Alignment`.
+    @experimental(as_of="0.4.0")
+    def majority_consensus(self):
+        """Return the majority consensus sequence for the alignment.
 
         Returns
         -------
-        skbio.sequence.BiologicalSequence
+        skbio.Sequence
             The consensus sequence of the `Alignment`. In other words, at each
             position the most common character is chosen, and those characters
             are combined to create a new sequence. The sequence will not have
-            its ID, description, or quality set; only the consensus sequence
-            will be set.
+            its ID, description, or quality set; only the sequence will be set.
+            The type of biological sequence that is returned will be the same
+            type as the first sequence in the alignment, or ``Sequence`` if the
+            alignment is empty.
 
         Notes
         -----
@@ -1312,42 +1064,37 @@ class Alignment(SequenceCollection):
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('AC--', id="seq1"),
-        ...              DNA('AT-C', id="seq2"),
-        ...              DNA('TT-C', id="seq3")]
+        >>> from skbio import Alignment
+        >>> from skbio import DNA
+        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
+        ...              DNA('AT-C', metadata={'id': "seq2"}),
+        ...              DNA('TT-C', metadata={'id': "seq3"})]
         >>> a1 = Alignment(sequences)
         >>> a1.majority_consensus()
-        <DNASequence: AT-C (length: 4)>
+        DNA
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: True
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 33.33%
+        -----------------------------
+        0 AT-C
 
         """
-        # handle empty Alignment case
         if self.is_empty():
-            return ''
-
-        if constructor is None:
-            constructor = self[0].__class__
+            seq_constructor = Sequence
         else:
-            warnings.warn(
-                "constructor parameter in Alignment.majority_consensus is "
-                "deprecated and will be removed in scikit-bio 0.3.0. Please "
-                "update your code to construct the desired object from the "
-                "BiologicalSequence (or subclass) that is returned by this "
-                "method.", DeprecationWarning)
-
-        result = []
-        for c in self.position_counters():
-            # Counter.most_common returns an ordered list of the
-            # n most common (sequence, count) items in Counter. Here
-            # we set n=1, and take only the character, not the count.
-            result.append(c.most_common(1)[0][0])
+            seq_constructor = self[0].__class__
 
-        # TODO when constructor parameter is removed, this join call can be
-        # removed
-        result = ''.join(result)
-        return constructor(result)
+        # Counter.most_common returns an ordered list of the n most common
+        # (sequence, count) items in Counter. Here we set n=1, and take only
+        # the character, not the count.
+        return seq_constructor(''.join(c.most_common(1)[0][0]
+                               for c in self.position_counters()))
 
+    @experimental(as_of="0.4.0")
     def omit_gap_positions(self, maximum_gap_frequency):
         """Returns Alignment with positions filtered based on gap frequency
 
@@ -1367,11 +1114,11 @@ class Alignment(SequenceCollection):
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('AC--', id="seq1"),
-        ...              DNA('AT-C', id="seq2"),
-        ...              DNA('TT-C', id="seq3")]
+        >>> from skbio import Alignment
+        >>> from skbio import DNA
+        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
+        ...              DNA('AT-C', metadata={'id': "seq2"}),
+        ...              DNA('TT-C', metadata={'id': "seq3"})]
         >>> a1 = Alignment(sequences)
         >>> a2 = a1.omit_gap_positions(0.50)
         >>> a2
@@ -1389,15 +1136,16 @@ class Alignment(SequenceCollection):
             return self.__class__([])
 
         position_frequencies = self.position_frequencies()
-        gap_alphabet = self[0].gap_alphabet()
+        gap_chars = self[0].gap_chars
 
         positions_to_keep = []
         for i, f in enumerate(position_frequencies):
-            gap_frequency = sum([f[c] for c in gap_alphabet])
+            gap_frequency = sum([f[c] for c in gap_chars])
             if gap_frequency <= maximum_gap_frequency:
                 positions_to_keep.append(i)
         return self.subalignment(positions_to_keep=positions_to_keep)
 
+    @experimental(as_of="0.4.0")
     def omit_gap_sequences(self, maximum_gap_frequency):
         """Returns Alignment with sequences filtered based on gap frequency
 
@@ -1417,11 +1165,11 @@ class Alignment(SequenceCollection):
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('AC--', id="seq1"),
-        ...              DNA('AT-C', id="seq2"),
-        ...              DNA('TT-C', id="seq3")]
+        >>> from skbio import Alignment
+        >>> from skbio import DNA
+        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
+        ...              DNA('AT-C', metadata={'id': "seq2"}),
+        ...              DNA('TT-C', metadata={'id': "seq3"})]
         >>> a1 = Alignment(sequences)
         >>> a2 = a1.omit_gap_sequences(0.49)
         >>> a2
@@ -1436,17 +1184,18 @@ class Alignment(SequenceCollection):
         if self.is_empty():
             return self.__class__([])
 
-        base_frequencies = self.k_word_frequencies(k=1)
-        gap_alphabet = self[0].gap_alphabet()
+        base_frequencies = self.kmer_frequencies(k=1, relative=True)
+        gap_chars = self[0].gap_chars
         seqs_to_keep = []
         for seq, f in zip(self, base_frequencies):
-            gap_frequency = sum([f[c] for c in gap_alphabet])
+            gap_frequency = sum([f[c] for c in gap_chars])
             if gap_frequency <= maximum_gap_frequency:
-                seqs_to_keep.append(seq.id)
+                seqs_to_keep.append(seq.metadata['id'])
         return self.subalignment(seqs_to_keep=seqs_to_keep)
 
+    @experimental(as_of="0.4.0")
     def position_counters(self):
-        """Return collections.Counter object for positions in Alignment
+        """Return counts of characters at each position in the alignment
 
         Returns
         -------
@@ -1461,11 +1210,11 @@ class Alignment(SequenceCollection):
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('AC--', id="seq1"),
-        ...              DNA('AT-C', id="seq2"),
-        ...              DNA('TT-C', id="seq3")]
+        >>> from skbio import Alignment
+        >>> from skbio import DNA
+        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
+        ...              DNA('AT-C', metadata={'id': "seq2"}),
+        ...              DNA('TT-C', metadata={'id': "seq3"})]
         >>> a1 = Alignment(sequences)
         >>> for counter in a1.position_counters():
         ...     print(counter)
@@ -1477,6 +1226,7 @@ class Alignment(SequenceCollection):
         """
         return [Counter(p) for p in self.iter_positions(constructor=str)]
 
+    @experimental(as_of="0.4.0")
     def position_frequencies(self):
         """Return frequencies of characters for positions in Alignment
 
@@ -1491,15 +1241,15 @@ class Alignment(SequenceCollection):
         --------
         position_counters
         position_entropies
-        k_word_frequencies
+        kmer_frequencies
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('AC--', id="seq1"),
-        ...              DNA('AT-C', id="seq2"),
-        ...              DNA('TT-C', id="seq3")]
+        >>> from skbio import Alignment
+        >>> from skbio import DNA
+        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
+        ...              DNA('AT-C', metadata={'id': "seq2"}),
+        ...              DNA('TT-C', metadata={'id': "seq3"})]
         >>> a1 = Alignment(sequences)
         >>> position_freqs = a1.position_frequencies()
         >>> round(position_freqs[0]['A'], 3)
@@ -1517,6 +1267,7 @@ class Alignment(SequenceCollection):
             result.append(freqs)
         return result
 
+    @experimental(as_of="0.4.0")
     def position_entropies(self, base=None,
                            nan_on_non_standard_chars=True):
         """Return Shannon entropy of positions in Alignment
@@ -1524,10 +1275,10 @@ class Alignment(SequenceCollection):
         Parameters
         ----------
         base : float, optional
-            log base for entropy calculation. If not passed, default will be e
+            Log base for entropy calculation. If not passed, default will be e
             (i.e., natural log will be computed).
         nan_on_non_standard_chars : bool, optional
-            if True, the entropy at positions containing characters outside of
+            If True, the entropy at positions containing characters outside of
             the first sequence's `iupac_standard_characters` will be `np.nan`.
             This is useful, and the default behavior, as it's not clear how a
             gap or degenerate character should contribute to a positional
@@ -1556,14 +1307,15 @@ class Alignment(SequenceCollection):
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('AC--', id="seq1"),
-        ...              DNA('AT-C', id="seq2"),
-        ...              DNA('TT-C', id="seq3")]
+        >>> from skbio import Alignment
+        >>> from skbio import DNA
+        >>> sequences = [DNA('AA--', metadata={'id': "seq1"}),
+        ...              DNA('AC-C', metadata={'id': "seq2"}),
+        ...              DNA('AT-C', metadata={'id': "seq3"}),
+        ...              DNA('TG-C', metadata={'id': "seq4"})]
         >>> a1 = Alignment(sequences)
         >>> print(a1.position_entropies())
-        [0.63651416829481278, 0.63651416829481278, nan, nan]
+        [0.56233514461880829, 1.3862943611198906, nan, nan]
 
         """
         result = []
@@ -1571,7 +1323,7 @@ class Alignment(SequenceCollection):
         if self.is_empty():
             return result
 
-        iupac_standard_characters = self[0].iupac_standard_characters()
+        iupac_standard_characters = self[0].nondegenerate_chars
         for f in self.position_frequencies():
             if (nan_on_non_standard_chars and
                     len(viewkeys(f) - iupac_standard_characters) > 0):
@@ -1580,6 +1332,7 @@ class Alignment(SequenceCollection):
                 result.append(entropy(list(f.values()), base=base))
         return result
 
+    @experimental(as_of="0.4.0")
     def sequence_length(self):
         """Return the number of positions in Alignment
 
@@ -1595,11 +1348,11 @@ class Alignment(SequenceCollection):
 
         Examples
         --------
-        >>> from skbio.alignment import Alignment
-        >>> from skbio.sequence import DNA
-        >>> sequences = [DNA('AC--', id="seq1"),
-        ...              DNA('AT-C', id="seq2"),
-        ...              DNA('TT-C', id="seq3")]
+        >>> from skbio import Alignment
+        >>> from skbio import DNA
+        >>> sequences = [DNA('AC--', metadata={'id': "seq1"}),
+        ...              DNA('AT-C', metadata={'id': "seq2"}),
+        ...              DNA('TT-C', metadata={'id': "seq3"})]
         >>> a1 = Alignment(sequences)
         >>> a1.sequence_length()
         4
@@ -1611,59 +1364,6 @@ class Alignment(SequenceCollection):
         else:
             return len(self._data[0])
 
-    def to_phylip(self, map_labels=False, label_prefix=""):
-        """Return phylip-formatted string representing the `SequenceCollection`
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``Alignment.to_phylip`` will be removed in scikit-bio 0.3.0. It is
-           replaced by ``Alignment.write``, which is a more general method for
-           serializing alignments. ``Alignment.write`` supports multiple file
-           formats by taking advantage of scikit-bio's I/O registry system. See
-           :mod:`skbio.io` for more details.
-
-        Returns
-        -------
-        str
-            A phylip-formatted string representing the `Alignment`.
-
-        See Also
-        --------
-        write
-
-        """
-        warnings.warn(
-            "Alignment.to_phylip is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use "
-            "Alignment.write.", DeprecationWarning)
-
-        if self.is_empty():
-            raise SequenceCollectionError("PHYLIP-formatted string can only "
-                                          "be generated if there is at least "
-                                          "one sequence in the Alignment.")
-
-        sequence_length = self.sequence_length()
-        if sequence_length == 0:
-            raise SequenceCollectionError("PHYLIP-formatted string can only "
-                                          "be generated if there is at least "
-                                          "one position in the Alignment.")
-
-        ids = self.ids()
-        sequence_count = self.sequence_count()
-        result = ["%d %d" % (sequence_count, sequence_length)]
-        if map_labels:
-            _, new_id_to_old_id = self.update_ids(prefix=label_prefix)
-            old_id_to_new_id = {v: k for k, v in new_id_to_old_id.items()}
-        else:
-            new_id_to_old_id = {seq_id: seq_id for seq_id in ids}
-            old_id_to_new_id = new_id_to_old_id
-
-        for seq_id in ids:
-            new_id = old_id_to_new_id[seq_id]
-            seq = self[seq_id]
-            result.append("%s %s" % (new_id, str(seq)))
-
-        return '\n'.join(result), new_id_to_old_id
-
     def _validate_lengths(self):
         """Return ``True`` if all sequences same length, ``False`` otherwise
         """
@@ -1672,424 +1372,3 @@ class Alignment(SequenceCollection):
             if seq1_length != len(seq):
                 return False
         return True
-
-
-class StockholmAlignment(Alignment):
-    """Contains the metadata information in a Stockholm file alignment
-
-    Parameters
-    ----------
-    seqs : list of `skbio.sequence.BiologicalSequence` objects
-        The `skbio.sequence.BiologicalSequence` objects to load.
-    gf : dict, optional
-        GF info in the format {feature: info}
-    gs : dict of dicts, optional
-        GS info in the format {feature: {seqlabel: info}}
-    gr : dict of dicts, optional
-        GR info in the format {feature: {seqlabel: info}}
-    gc : dict, optional
-        GC info in the format {feature: info}
-
-    Notes
-    -----
-    The Stockholm format is described in [1]_ and [2]_.
-
-    If there are multiple references, include information for each R* line
-    as a list, with reference 0 information in position 0 for all lists,
-    etc. This list will be broken up into the appropriate bits for each
-    reference on string formatting.
-
-    If there are multiple trees included, use a list to store identifiers
-    and trees, with position 0 holding identifier for tree in position 0,
-    etc.
-
-    References
-    ----------
-    .. [1] http://sonnhammer.sbc.su.se/Stockholm.html
-    .. [2] http://en.wikipedia.org/wiki/Stockholm_format
-
-    Examples
-    --------
-    Assume we have a basic stockholm file with the following contents::
-
-        # STOCKHOLM 1.0
-        seq1         ACC--G-GGGU
-        seq2         TCC--G-GGGA
-        #=GC SS_cons (((.....)))
-        //
-
-    >>> from skbio.sequence import RNA
-    >>> from skbio.alignment import StockholmAlignment
-    >>> from StringIO import StringIO
-    >>> sto_in = StringIO("# STOCKHOLM 1.0\\n"
-    ...                   "seq1     ACC--G-GGGU\\nseq2     TCC--G-GGGA\\n"
-    ...                   "#=GC SS_cons (((.....)))\\n//")
-    >>> sto_records = StockholmAlignment.from_file(sto_in, RNA)
-    >>> sto = next(sto_records)
-    >>> print(sto)
-    # STOCKHOLM 1.0
-    seq1          ACC--G-GGGU
-    seq2          TCC--G-GGGA
-    #=GC SS_cons  (((.....)))
-    //
-    >>> sto.gc
-    {'SS_cons': '(((.....)))'}
-
-    We can also write out information by instantiating the StockholmAlignment
-    object and then printing it.
-
-    >>> from skbio.sequence import RNA
-    >>> from skbio.alignment import StockholmAlignment
-    >>> seqs = [RNA("ACC--G-GGGU", id="seq1"),
-    ...     RNA("TCC--G-GGGA", id="seq2")]
-    >>> gf = {
-    ... "RT": ["TITLE1",  "TITLE2"],
-    ... "RA": ["Auth1;", "Auth2;"],
-    ... "RL": ["J Mol Biol", "Cell"],
-    ... "RM": ["11469857", "12007400"]}
-    >>> sto = StockholmAlignment(seqs, gf=gf)
-    >>> print(sto)
-    # STOCKHOLM 1.0
-    #=GF RN [1]
-    #=GF RM 11469857
-    #=GF RT TITLE1
-    #=GF RA Auth1;
-    #=GF RL J Mol Biol
-    #=GF RN [2]
-    #=GF RM 12007400
-    #=GF RT TITLE2
-    #=GF RA Auth2;
-    #=GF RL Cell
-    seq1          ACC--G-GGGU
-    seq2          TCC--G-GGGA
-    //
-    """
-    def __init__(self, seqs, gf=None, gs=None, gr=None, gc=None,
-                 validate=False):
-        self.gf = gf if gf else {}
-        self.gs = gs if gs else {}
-        self.gr = gr if gr else {}
-        self.gc = gc if gc else {}
-        super(StockholmAlignment, self).__init__(seqs, validate)
-
-    def __str__(self):
-        """Parses StockholmAlignment into a string with stockholm format
-
-        Returns
-        -------
-        str
-            Stockholm formatted string containing all information in the object
-
-        Notes
-        -----
-        If references are included in GF data, the RN lines are automatically
-        generated if not provided.
-
-        """
-
-        # find length of leader info needed to make file pretty
-        # 10 comes from the characters for '#=GF ' and the feature after label
-        infolen = max(len(seq.id) for seq in self._data) + 10
-
-        GF_lines = []
-        GS_lines = []
-        GC_lines = []
-        # NOTE: EVERYTHING MUST BE COERECED TO STR in case int or float passed
-        # add GF information if applicable
-        if self.gf:
-            skipfeatures = set(("NH", "RC", "RM", "RN", "RA", "RL"))
-            for feature, value in self.gf.items():
-                # list of features to skip and parse special later
-                if feature in skipfeatures:
-                    continue
-                # list of features to parse special
-                elif feature == "TN":
-                    # trees must be in proper order of identifier then tree
-                    ident = value if isinstance(value, list) else [value]
-                    tree = self.gf["NH"] if isinstance(self.gf["NH"], list) \
-                        else [self.gf["NH"]]
-                    for ident, tree in zip(self.gf["TN"], self.gf["NH"]):
-                        GF_lines.append(' '.join(["#=GF", "TN", str(ident)]))
-                        GF_lines.append(' '.join(["#=GF", "NH", str(tree)]))
-                elif feature == "RT":
-                    # make sure each reference block stays together
-                    # set up lists to zip in case some bits are missing
-                    # create rn list if needed
-                    default_none = [0]*len(value)
-                    rn = self.gf.get("RN", ["[%i]" % x for x in
-                                     range(1, len(value)+1)])
-                    rm = self.gf.get("RM", default_none)
-                    rt = self.gf.get("RT", default_none)
-                    ra = self.gf.get("RA", default_none)
-                    rl = self.gf.get("RL", default_none)
-                    rc = self.gf.get("RC", default_none)
-                    # order: RN, RM, RT, RA, RL, RC
-                    for n, m, t, a, l, c in zip(rn, rm, rt, ra, rl, rc):
-                        GF_lines.append(' '.join(["#=GF", "RN", n]))
-                        if m:
-                            GF_lines.append(' '.join(["#=GF", "RM", str(m)]))
-                        if t:
-                            GF_lines.append(' '.join(["#=GF", "RT", str(t)]))
-                        if a:
-                            GF_lines.append(' '.join(["#=GF", "RA", str(a)]))
-                        if l:
-                            GF_lines.append(' '.join(["#=GF", "RL", str(l)]))
-                        if c:
-                            GF_lines.append(' '.join(["#=GF", "RC", str(c)]))
-                else:
-                    # normal addition for everything else
-                    if not isinstance(value, list):
-                        value = [value]
-                    for val in value:
-                        GF_lines.append(' '.join(["#=GF", feature, str(val)]))
-
-        # add GS information if applicable
-        if self.gs:
-            for feature in self.gs:
-                for seqname in self.gs[feature]:
-                    GS_lines.append(' '.join(["#=GS", seqname, feature,
-                                             str(self.gs[feature][seqname])]))
-
-        # add GC information if applicable
-        if self.gc:
-            for feature, value in viewitems(self.gc):
-                leaderinfo = ' '.join(["#=GC", feature])
-                spacer = ' ' * (infolen - len(leaderinfo))
-                GC_lines.append(spacer.join([leaderinfo,
-                                             str(self.gc[feature])]))
-
-        sto_lines = ["# STOCKHOLM 1.0"] + GF_lines + GS_lines
-        # create seq output along with GR info if applicable
-        for label, seq in self.iteritems():
-            spacer = ' ' * (infolen - len(label))
-            sto_lines.append(spacer.join([label, str(seq)]))
-            # GR info added for sequence
-            for feature in viewkeys(self.gr):
-                value = self.gr[feature][label]
-                leaderinfo = ' '.join(['#=GR', label, feature])
-                spacer = ' ' * (infolen - len(leaderinfo))
-                sto_lines.append(spacer.join([leaderinfo, value]))
-
-        sto_lines.extend(GC_lines)
-        # add final slashes to end of file
-        sto_lines.append('//')
-
-        return '\n'.join(sto_lines)
-
-    def to_file(self, out_f):
-        r"""Save the alignment to file in text format.
-
-        Parameters
-        ----------
-        out_f : file-like object or filename
-            File-like object to write serialized data to, or name of
-            file. If it's a file-like object, it must have a ``write``
-            method, and it won't be closed. Else, it is opened and
-            closed after writing.
-
-        See Also
-        --------
-        from_file
-        """
-        with open_file(out_f, 'w') as out_f:
-            out_f.write(self.__str__())
-
-    @staticmethod
-    def _parse_gf_info(lines):
-        """Takes care of parsing GF lines in stockholm plus special cases"""
-        parsed = defaultdict(list)
-        # needed for making each multi-line RT and NH one string
-        rt = []
-        nh = []
-        lastline = ""
-        for line in lines:
-            try:
-                init, feature, content = line.split(None, 2)
-            except ValueError:
-                raise StockholmParseError("Malformed GF line encountered!"
-                                          "\n%s" % line.split(None, 2))
-            if init != "#=GF":
-                raise StockholmParseError("Non-GF line encountered!")
-
-            # take care of adding multiline RT to the parsed information
-            if lastline == "RT" and feature != "RT":
-                # add rt line to the parsed dictionary
-                rtline = " ".join(rt)
-                rt = []
-                parsed["RT"].append(rtline)
-            elif feature == "RT":
-                rt.append(content)
-                lastline = feature
-                continue
-
-            # Take care of adding multiline NH to the parsed dictionary
-            elif lastline == "NH" and feature != "NH":
-                nhline = " ".join(nh)
-                nh = []
-                parsed["NH"].append(nhline)
-            elif feature == "NH":
-                nh.append(content)
-                lastline = feature
-                continue
-
-            # add current feature to the parsed information
-            parsed[feature].append(content)
-            lastline = feature
-
-        # removing unneccessary lists from parsed. Use .items() for py3 support
-        for feature, value in parsed.items():
-            # list of multi-line features to join into single string if needed
-            if feature in ["CC"]:
-                parsed[feature] = ' '.join(value)
-            elif len(parsed[feature]) == 1:
-                parsed[feature] = value[0]
-        return parsed
-
-    @staticmethod
-    def _parse_gc_info(lines, strict=False, seqlen=-1):
-        """Takes care of parsing GC lines in stockholm format"""
-        parsed = {}
-        for line in lines:
-            try:
-                init, feature, content = line.split(None, 2)
-            except ValueError:
-                raise StockholmParseError("Malformed GC line encountered!\n%s"
-                                          % line.split(None, 2))
-            if init != "#=GC":
-                raise StockholmParseError("Non-GC line encountered!")
-
-            # add current feature to the parsed information
-            if feature in parsed:
-                if strict:
-                    raise StockholmParseError("Should not have multiple lines "
-                                              "with the same feature: %s" %
-                                              feature)
-            else:
-                parsed[feature] = [content]
-
-        # removing unneccessary lists from parsed. Use .items() for py3 support
-        for feature, value in parsed.items():
-            parsed[feature] = ''.join(value)
-            if strict:
-                if len(value) != seqlen:
-                    raise StockholmParseError("GC must have exactly one char "
-                                              "per position in alignment!")
-
-        return parsed
-
-    @staticmethod
-    def _parse_gs_gr_info(lines, strict=False, seqlen=-1):
-        """Takes care of parsing GS and GR lines in stockholm format"""
-        parsed = {}
-        parsetype = ""
-        for line in lines:
-            try:
-                init, label, feature, content = line.split(None, 3)
-            except ValueError:
-                raise StockholmParseError("Malformed GS/GR line encountered!"
-                                          "\n%s" % line.split(None, 3))
-            if parsetype == "":
-                parsetype = init
-            elif init != parsetype:
-                    raise StockholmParseError("Non-GS/GR line encountered!")
-
-            # parse each line, taking into account interleaved format
-            if feature in parsed and label in parsed[feature]:
-                # interleaved format, so need list of content
-                parsed[feature][label].append(content)
-            else:
-                parsed[feature] = {label: [content]}
-
-        # join all the crazy lists created during parsing
-        for feature in parsed:
-            for label, content in parsed[feature].items():
-                parsed[feature][label] = ''.join(content)
-                if strict:
-                    if len(parsed[feature][label]) != seqlen:
-                        raise StockholmParseError("GR must have exactly one "
-                                                  "char per position in the "
-                                                  "alignment!")
-        return parsed
-
-    @classmethod
-    def from_file(cls, infile, seq_constructor, strict=False):
-        r"""yields StockholmAlignment objects from a stockholm file.
-
-        Parameters
-        ----------
-        infile : open file object
-            An open stockholm file.
-
-        seq_constructor : BiologicalSequence object
-            The biologicalsequence object that corresponds to what the
-            stockholm file holds. See skbio.sequence
-
-        strict : bool (optional)
-            Turns on strict parsing of GR and GC lines to ensure one char per
-             position. Default: False
-
-        Returns
-        -------
-        Iterator of StockholmAlignment objects
-
-        Raises
-        ------
-        skbio.alignment.StockholmParseError
-            If any lines are found that don't conform to stockholm format
-        """
-        # make sure first line is corect
-        line = infile.readline()
-        if not line.startswith("# STOCKHOLM 1.0"):
-            raise StockholmParseError("Incorrect header found")
-        gs_lines = []
-        gf_lines = []
-        gr_lines = []
-        gc_lines = []
-        # OrderedDict used so sequences maintain same order as in file
-        seqs = OrderedDict()
-        for line in infile:
-            line = line.strip()
-            if line == "" or line.startswith("# S"):
-                # skip blank lines or secondary headers
-                continue
-            elif line == "//":
-                # parse the record since we are at its end
-                # build the seuence list for alignment construction
-                seqs = [seq_constructor(seq, id=_id) for _id, seq in
-                        viewitems(seqs)]
-                # get length of sequences in the alignment
-                seqlen = len(seqs[0][1])
-
-                # parse information lines
-                gf = cls._parse_gf_info(gf_lines)
-                gs = cls._parse_gs_gr_info(gs_lines)
-                gr = cls._parse_gs_gr_info(gr_lines, strict, seqlen)
-                gc = cls._parse_gc_info(gc_lines, strict, seqlen)
-
-                # yield the actual stockholm object
-                yield cls(seqs, gf, gs, gr, gc)
-
-                # reset all storage variables
-                gs_lines = []
-                gf_lines = []
-                gr_lines = []
-                gc_lines = []
-                seqs = OrderedDict()
-            # add the metadata lines to the proper lists
-            elif line.startswith("#=GF"):
-                gf_lines.append(line)
-            elif line.startswith("#=GS"):
-                gs_lines.append(line)
-            elif line.startswith("#=GR"):
-                gr_lines.append(line)
-            elif line.startswith("#=GC"):
-                gc_lines.append(line)
-            else:
-                lineinfo = line.split()
-                # assume sequence since nothing else in format is left
-                # in case of interleaved format, need to do check
-                if lineinfo[0] in seqs:
-                    sequence = seqs[lineinfo[0]]
-                    seqs[lineinfo[0]] = ''.join([sequence, lineinfo[1]])
-                else:
-                    seqs[lineinfo[0]] = lineinfo[1]
diff --git a/skbio/alignment/_exception.py b/skbio/alignment/_exception.py
index 0577b8a..b4c447e 100644
--- a/skbio/alignment/_exception.py
+++ b/skbio/alignment/_exception.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,7 +6,7 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from skbio.io import FileFormatError
+from __future__ import absolute_import, division, print_function
 
 
 class SequenceCollectionError(Exception):
@@ -19,8 +17,3 @@ class SequenceCollectionError(Exception):
 class AlignmentError(SequenceCollectionError):
     """General error for alignment validation failures."""
     pass
-
-
-class StockholmParseError(FileFormatError):
-    """Exception raised when a Stockholm formatted file cannot be parsed."""
-    pass
diff --git a/skbio/alignment/_lib/__init__.py b/skbio/alignment/_lib/__init__.py
index 610d868..f3468bd 100644
--- a/skbio/alignment/_lib/__init__.py
+++ b/skbio/alignment/_lib/__init__.py
@@ -6,5 +6,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
-test = Tester().test
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
+test = TestRunner(__file__).test
diff --git a/skbio/alignment/_pairwise.py b/skbio/alignment/_pairwise.py
index 9fa3c87..bfe0131 100644
--- a/skbio/alignment/_pairwise.py
+++ b/skbio/alignment/_pairwise.py
@@ -15,8 +15,11 @@ from future.builtins import range, zip
 from six import string_types
 
 from skbio.alignment import Alignment
-from skbio.sequence import BiologicalSequence
+from skbio.alignment._ssw_wrapper import StripedSmithWaterman
+from skbio.sequence import Sequence, Protein
+from skbio.sequence._iupac_sequence import IUPACSequence
 from skbio.util import EfficiencyWarning
+from skbio.util._decorator import experimental, deprecated
 
 # This is temporary: blosum50 does not exist in skbio yet as per
 # issue 161. When the issue is resolved, this should be removed in favor
@@ -121,6 +124,7 @@ blosum50 = \
               'Y': -2, 'X': -1, 'Z': 5}}
 
 
+ at experimental(as_of="0.4.0")
 def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
                                     gap_extend_penalty=2,
                                     match_score=2, mismatch_score=-3,
@@ -129,9 +133,9 @@ def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
 
     Parameters
     ----------
-    seq1 : str or BiologicalSequence
+    seq1 : str or Sequence
         The first unaligned sequence.
-    seq2 : str or BiologicalSequence
+    seq2 : str or Sequence
         The second unaligned sequence.
     gap_open_penalty : int or float, optional
         Penalty for opening a gap (this is substracted from previous best
@@ -189,6 +193,7 @@ def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
                                 gap_extend_penalty, substitution_matrix)
 
 
+ at experimental(as_of="0.4.0")
 def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
                                  gap_extend_penalty=1,
                                  substitution_matrix=None):
@@ -196,9 +201,9 @@ def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
 
     Parameters
     ----------
-    seq1 : str or BiologicalSequence
+    seq1 : str or Sequence
         The first unaligned sequence.
-    seq2 : str or BiologicalSequence
+    seq2 : str or Sequence
         The second unaligned sequence.
     gap_open_penalty : int or float, optional
         Penalty for opening a gap (this is substracted from previous best
@@ -248,15 +253,16 @@ def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
                                 gap_extend_penalty, substitution_matrix)
 
 
+ at experimental(as_of="0.4.0")
 def local_pairwise_align(seq1, seq2, gap_open_penalty,
                          gap_extend_penalty, substitution_matrix):
     """Locally align exactly two seqs with Smith-Waterman
 
     Parameters
     ----------
-    seq1 : str or BiologicalSequence
+    seq1 : str or Sequence
         The first unaligned sequence.
-    seq2 : str or BiologicalSequence
+    seq2 : str or Sequence
         The second unaligned sequence.
     gap_open_penalty : int or float
         Penalty for opening a gap (this is substracted from previous best
@@ -322,6 +328,7 @@ def local_pairwise_align(seq1, seq2, gap_open_penalty,
                      start_end_positions=start_end_positions)
 
 
+ at experimental(as_of="0.4.0")
 def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
                                      gap_extend_penalty=2,
                                      match_score=1, mismatch_score=-2,
@@ -331,9 +338,9 @@ def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
 
     Parameters
     ----------
-    seq1 : str, BiologicalSequence, or Alignment
+    seq1 : str, Sequence, or Alignment
         The first unaligned sequence(s).
-    seq2 : str, BiologicalSequence, or Alignment
+    seq2 : str, Sequence, or Alignment
         The second unaligned sequence(s).
     gap_open_penalty : int or float, optional
         Penalty for opening a gap (this is substracted from previous best
@@ -402,6 +409,7 @@ def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
                                  penalize_terminal_gaps=penalize_terminal_gaps)
 
 
+ at experimental(as_of="0.4.0")
 def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
                                   gap_extend_penalty=1,
                                   substitution_matrix=None,
@@ -410,9 +418,9 @@ def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
 
     Parameters
     ----------
-    seq1 : str, BiologicalSequence, or Alignment
+    seq1 : str, Sequence, or Alignment
         The first unaligned sequence(s).
-    seq2 : str, BiologicalSequence, or Alignment
+    seq2 : str, Sequence, or Alignment
         The second unaligned sequence(s).
     gap_open_penalty : int or float, optional
         Penalty for opening a gap (this is substracted from previous best
@@ -473,15 +481,16 @@ def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
                                  penalize_terminal_gaps=penalize_terminal_gaps)
 
 
+ at experimental(as_of="0.4.0")
 def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
                           substitution_matrix, penalize_terminal_gaps=False):
     """Globally align a pair of seqs or alignments with Needleman-Wunsch
 
     Parameters
     ----------
-    seq1 : str, BiologicalSequence, or Alignment
+    seq1 : str, Sequence, or Alignment
         The first unaligned sequence(s).
-    seq2 : str, BiologicalSequence, or Alignment
+    seq2 : str, Sequence, or Alignment
         The second unaligned sequence(s).
     gap_open_penalty : int or float
         Penalty for opening a gap (this is substracted from previous best
@@ -567,6 +576,93 @@ def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
                      start_end_positions=start_end_positions)
 
 
+ at experimental(as_of="0.4.0")
+def local_pairwise_align_ssw(sequence1, sequence2, constructor=Sequence,
+                             **kwargs):
+    """Align query and target sequences with Striped Smith-Waterman.
+
+    Parameters
+    ----------
+    sequence1 : str or Sequence
+        The first unaligned sequence
+    sequence2 : str or Sequence
+        The second unaligned sequence
+    constructor : Sequence subclass
+        A constructor to use if `protein` is not True.
+
+    Returns
+    -------
+    ``skbio.alignment.Alignment``
+        The resulting alignment as an Alignment object
+
+    Notes
+    -----
+    This is a wrapper for the SSW package [1]_.
+
+    For a complete list of optional keyword-arguments that can be provided,
+    see ``skbio.alignment.StripedSmithWaterman``.
+
+    The following kwargs will not have any effect: `suppress_sequences` and
+    `zero_index`
+
+    If an alignment does not meet a provided filter, `None` will be returned.
+
+    References
+    ----------
+    .. [1] Zhao, Mengyao, Wan-Ping Lee, Erik P. Garrison, & Gabor T.
+       Marth. "SSW Library: An SIMD Smith-Waterman C/C++ Library for
+       Applications". PLOS ONE (2013). Web. 11 July 2014.
+       http://www.plosone.org/article/info:doi/10.1371/journal.pone.0082138
+
+    See Also
+    --------
+    skbio.alignment.StripedSmithWaterman
+
+    """
+    # We need the sequences for `Alignment` to make sense, so don't let the
+    # user suppress them.
+    kwargs['suppress_sequences'] = False
+    kwargs['zero_index'] = True
+
+    if isinstance(sequence1, Protein):
+        kwargs['protein'] = True
+
+    query = StripedSmithWaterman(str(sequence1), **kwargs)
+    alignment = query(str(sequence2))
+
+    # If there is no cigar, then it has failed a filter. Return None.
+    if not alignment.cigar:
+        return None
+
+    start_end = None
+    if alignment.query_begin != -1:
+        start_end = [
+            (alignment.query_begin, alignment.query_end),
+            (alignment.target_begin, alignment.target_end_optimal)
+        ]
+    if kwargs.get('protein', False):
+        seqs = [
+            Protein(alignment.aligned_query_sequence,
+                    metadata={'id': 'query'}),
+            Protein(alignment.aligned_target_sequence,
+                    metadata={'id': 'target'})
+        ]
+    else:
+        seqs = [
+            constructor(alignment.aligned_query_sequence,
+                        metadata={'id': 'query'}),
+            constructor(alignment.aligned_target_sequence,
+                        metadata={'id': 'target'})
+        ]
+
+    return Alignment(seqs, score=alignment.optimal_alignment_score,
+                     start_end_positions=start_end)
+
+
+ at deprecated(as_of="0.4.0", until="0.4.1",
+            reason="Will be replaced by a SubstitutionMatrix class. To track "
+                   "progress, see [#161]"
+                   "(https://github.com/biocore/scikit-bio/issues/161).")
 def make_identity_substitution_matrix(match_score, mismatch_score,
                                       alphabet='ACGTU'):
     """Generate substitution matrix where all matches are scored equally
@@ -590,13 +686,6 @@ def make_identity_substitution_matrix(match_score, mismatch_score,
         score.
 
     """
-
-    warn("make_identity_substitution_matrix is deprecated and will soon be "
-         "replaced, though at the time of this writing the new name has not "
-         "been finalized. Updates will be posted to issue #161: "
-         "https://github.com/biocore/scikit-bio/issues/161",
-         DeprecationWarning)
-
     result = {}
     for c1 in alphabet:
         row = {}
@@ -618,9 +707,14 @@ def _coerce_alignment_input_type(seq, disallow_alignment):
     """ Converts variety of types into an skbio.Alignment object
     """
     if isinstance(seq, string_types):
-        return Alignment([BiologicalSequence(seq)])
-    elif isinstance(seq, BiologicalSequence):
-        return Alignment([seq])
+        return Alignment([Sequence(seq, metadata={'id': ''})])
+    elif isinstance(seq, Sequence):
+        if 'id' in seq.metadata:
+            return Alignment([seq])
+        else:
+            seq = seq.copy()
+            seq.metadata['id'] = ''
+            return Alignment([seq])
     elif isinstance(seq, Alignment):
         if disallow_alignment:
             # This will disallow aligning either a pair of alignments, or an
@@ -641,13 +735,9 @@ _traceback_encoding = {'match': 1, 'vertical-gap': 2, 'horizontal-gap': 3,
 
 
 def _get_seq_id(seq, default_id):
-    try:
-        result = seq.id
-    except AttributeError:
+    result = seq.metadata['id'] if 'id' in seq.metadata else default_id
+    if result is None or result.strip() == "":
         result = default_id
-    else:
-        if result is None or result.strip() == "":
-            result = default_id
     return result
 
 
@@ -707,9 +797,9 @@ def _init_matrices_nw_no_terminal_gap_penalty(
 def _compute_substitution_score(aln1_chars, aln2_chars, substitution_matrix,
                                 gap_substitution_score):
     substitution_score = 0
+    gap_chars = IUPACSequence.gap_chars
     for aln1_char, aln2_char in product(aln1_chars, aln2_chars):
-        if BiologicalSequence.is_gap(aln1_char) or\
-           BiologicalSequence.is_gap(aln2_char):
+        if aln1_char in gap_chars or aln2_char in gap_chars:
                 substitution_score += gap_substitution_score
         else:
             try:
@@ -877,12 +967,14 @@ def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
     for i in range(aln1_sequence_count):
         aligned_seq = ''.join(aligned_seqs1[i][::-1])
         seq_id = _get_seq_id(aln1[i], str(i))
-        aligned_seqs1[i] = BiologicalSequence(aligned_seq, id=seq_id)
+        constructor = aln1[i].__class__
+        aligned_seqs1[i] = constructor(aligned_seq, metadata={'id': seq_id})
 
     for i in range(aln2_sequence_count):
         aligned_seq = ''.join(aligned_seqs2[i][::-1])
         seq_id = _get_seq_id(aln2[i], str(i + aln1_sequence_count))
-        aligned_seqs2[i] = BiologicalSequence(aligned_seq, id=seq_id)
+        constructor = aln2[i].__class__
+        aligned_seqs2[i] = constructor(aligned_seq, metadata={'id': seq_id})
 
     return (aligned_seqs1, aligned_seqs2, best_score,
             current_col, current_row)
diff --git a/skbio/alignment/_ssw_wrapper.c b/skbio/alignment/_ssw_wrapper.c
index 6ede366..0602656 100644
--- a/skbio/alignment/_ssw_wrapper.c
+++ b/skbio/alignment/_ssw_wrapper.c
@@ -1,4 +1,4 @@
-/* Generated by Cython 0.20.2 on Thu Sep  4 14:19:20 2014 */
+/* Generated by Cython 0.22.1 */
 
 #define PY_SSIZE_T_CLEAN
 #ifndef CYTHON_USE_PYLONG_INTERNALS
@@ -16,11 +16,11 @@
 #include "Python.h"
 #ifndef Py_PYTHON_H
     #error Python headers needed to compile C extensions, please install development version of Python.
-#elif PY_VERSION_HEX < 0x02040000
-    #error Cython requires Python 2.4+.
+#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
+    #error Cython requires Python 2.6+ or Python 3.2+.
 #else
-#define CYTHON_ABI "0_20_2"
-#include <stddef.h> /* For offsetof */
+#define CYTHON_ABI "0_22_1"
+#include <stddef.h>
 #ifndef offsetof
 #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
 #endif
@@ -54,65 +54,11 @@
 #define CYTHON_COMPILING_IN_PYPY 0
 #define CYTHON_COMPILING_IN_CPYTHON 1
 #endif
-#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
 #define Py_OptimizeFlag 0
 #endif
-#if PY_VERSION_HEX < 0x02050000
-  typedef int Py_ssize_t;
-  #define PY_SSIZE_T_MAX INT_MAX
-  #define PY_SSIZE_T_MIN INT_MIN
-  #define PY_FORMAT_SIZE_T ""
-  #define CYTHON_FORMAT_SSIZE_T ""
-  #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
-  #define PyInt_AsSsize_t(o)   __Pyx_PyInt_As_int(o)
-  #define PyNumber_Index(o)    ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \
-                                (PyErr_Format(PyExc_TypeError, \
-                                              "expected index value, got %.200s", Py_TYPE(o)->tp_name), \
-                                 (PyObject*)0))
-  #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \
-                                  !PyComplex_Check(o))
-  #define PyIndex_Check __Pyx_PyIndex_Check
-  #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
-  #define __PYX_BUILD_PY_SSIZE_T "i"
-#else
-  #define __PYX_BUILD_PY_SSIZE_T "n"
-  #define CYTHON_FORMAT_SSIZE_T "z"
-  #define __Pyx_PyIndex_Check PyIndex_Check
-#endif
-#if PY_VERSION_HEX < 0x02060000
-  #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
-  #define Py_TYPE(ob)   (((PyObject*)(ob))->ob_type)
-  #define Py_SIZE(ob)   (((PyVarObject*)(ob))->ob_size)
-  #define PyVarObject_HEAD_INIT(type, size) \
-          PyObject_HEAD_INIT(type) size,
-  #define PyType_Modified(t)
-  typedef struct {
-     void *buf;
-     PyObject *obj;
-     Py_ssize_t len;
-     Py_ssize_t itemsize;
-     int readonly;
-     int ndim;
-     char *format;
-     Py_ssize_t *shape;
-     Py_ssize_t *strides;
-     Py_ssize_t *suboffsets;
-     void *internal;
-  } Py_buffer;
-  #define PyBUF_SIMPLE 0
-  #define PyBUF_WRITABLE 0x0001
-  #define PyBUF_FORMAT 0x0004
-  #define PyBUF_ND 0x0008
-  #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
-  #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
-  #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
-  #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
-  #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
-  #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE)
-  #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE)
-  typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
-  typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
-#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
 #if PY_MAJOR_VERSION < 3
   #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
   #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
@@ -124,23 +70,16 @@
           PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
   #define __Pyx_DefaultClassType PyType_Type
 #endif
-#if PY_VERSION_HEX < 0x02060000
-  #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict")
-#endif
-#if PY_MAJOR_VERSION >= 3
+#ifndef Py_TPFLAGS_CHECKTYPES
   #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
   #define Py_TPFLAGS_HAVE_INDEX 0
 #endif
-#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
   #define Py_TPFLAGS_HAVE_NEWBUFFER 0
 #endif
-#if PY_VERSION_HEX < 0x02060000
-  #define Py_TPFLAGS_HAVE_VERSION_TAG 0
-#endif
-#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT)
-  #define Py_TPFLAGS_IS_ABSTRACT 0
-#endif
-#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
   #define Py_TPFLAGS_HAVE_FINALIZE 0
 #endif
 #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
@@ -164,12 +103,17 @@
 #if CYTHON_COMPILING_IN_PYPY
   #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
   #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
+  #define __Pyx_PyFrozenSet_Size(s)         PyObject_Size(s)
 #else
   #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
   #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
       PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+  #define __Pyx_PyFrozenSet_Size(s)         PySet_Size(s)
 #endif
-#define __Pyx_PyString_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+  #define PyUnicode_Contains(u, s)  PySequence_Contains(u, s)
+#endif
+#define __Pyx_PyString_FormatSafe(a, b)   ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
 #define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
 #if PY_MAJOR_VERSION >= 3
   #define __Pyx_PyString_Format(a, b)  PyUnicode_Format(a, b)
@@ -183,36 +127,13 @@
   #define PyString_Check               PyUnicode_Check
   #define PyString_CheckExact          PyUnicode_CheckExact
 #endif
-#if PY_VERSION_HEX < 0x02060000
-  #define PyBytesObject                PyStringObject
-  #define PyBytes_Type                 PyString_Type
-  #define PyBytes_Check                PyString_Check
-  #define PyBytes_CheckExact           PyString_CheckExact
-  #define PyBytes_FromString           PyString_FromString
-  #define PyBytes_FromStringAndSize    PyString_FromStringAndSize
-  #define PyBytes_FromFormat           PyString_FromFormat
-  #define PyBytes_DecodeEscape         PyString_DecodeEscape
-  #define PyBytes_AsString             PyString_AsString
-  #define PyBytes_AsStringAndSize      PyString_AsStringAndSize
-  #define PyBytes_Size                 PyString_Size
-  #define PyBytes_AS_STRING            PyString_AS_STRING
-  #define PyBytes_GET_SIZE             PyString_GET_SIZE
-  #define PyBytes_Repr                 PyString_Repr
-  #define PyBytes_Concat               PyString_Concat
-  #define PyBytes_ConcatAndDel         PyString_ConcatAndDel
-#endif
 #if PY_MAJOR_VERSION >= 3
   #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
   #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
 #else
-  #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \
-                                         PyString_Check(obj) || PyUnicode_Check(obj))
+  #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
   #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
 #endif
-#if PY_VERSION_HEX < 0x02060000
-  #define PySet_Check(obj)             PyObject_TypeCheck(obj, &PySet_Type)
-  #define PyFrozenSet_Check(obj)       PyObject_TypeCheck(obj, &PyFrozenSet_Type)
-#endif
 #ifndef PySet_CheckExact
   #define PySet_CheckExact(obj)        (Py_TYPE(obj) == &PySet_Type)
 #endif
@@ -237,6 +158,11 @@
 #if PY_MAJOR_VERSION >= 3
   #define PyBoolObject                 PyLongObject
 #endif
+#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
+  #ifndef PyUnicode_InternFromString
+    #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
+  #endif
+#endif
 #if PY_VERSION_HEX < 0x030200A4
   typedef long Py_hash_t;
   #define __Pyx_PyInt_FromHash_t PyInt_FromLong
@@ -245,42 +171,10 @@
   #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
   #define __Pyx_PyInt_AsHash_t   PyInt_AsSsize_t
 #endif
-#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
-  #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
-  #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
-  #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
-#else
-  #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
-        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
-        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
-            (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
-  #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
-        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
-        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
-            (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
-  #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
-        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
-        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
-            (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
-#endif
 #if PY_MAJOR_VERSION >= 3
-  #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
-#endif
-#if PY_VERSION_HEX < 0x02050000
-  #define __Pyx_GetAttrString(o,n)   PyObject_GetAttrString((o),((char *)(n)))
-  #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
-  #define __Pyx_DelAttrString(o,n)   PyObject_DelAttrString((o),((char *)(n)))
+  #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
 #else
-  #define __Pyx_GetAttrString(o,n)   PyObject_GetAttrString((o),(n))
-  #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
-  #define __Pyx_DelAttrString(o,n)   PyObject_DelAttrString((o),(n))
-#endif
-#if PY_VERSION_HEX < 0x02050000
-  #define __Pyx_NAMESTR(n) ((char *)(n))
-  #define __Pyx_DOCSTR(n)  ((char *)(n))
-#else
-  #define __Pyx_NAMESTR(n) (n)
-  #define __Pyx_DOCSTR(n)  (n)
+  #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
 #endif
 #ifndef CYTHON_INLINE
   #if defined(__GNUC__)
@@ -316,11 +210,22 @@ static CYTHON_INLINE float __PYX_NAN() {
   return value;
 }
 #endif
+#define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None)
 #ifdef __cplusplus
 template<typename T>
 void __Pyx_call_destructor(T* x) {
     x->~T();
 }
+template<typename T>
+class __Pyx_FakeReference {
+  public:
+    __Pyx_FakeReference() : ptr(NULL) { }
+    __Pyx_FakeReference(T& ref) : ptr(&ref) { }
+    T *operator->() { return ptr; }
+    operator T&() { return *ptr; }
+  private:
+    T *ptr;
+};
 #endif
 
 
@@ -374,8 +279,15 @@ void __Pyx_call_destructor(T* x) {
 #   define CYTHON_UNUSED
 # endif
 #endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+#  define CYTHON_NCP_UNUSED
+# else
+#  define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
 typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
-                const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+                const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
 
 #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
@@ -408,11 +320,11 @@ static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
 #endif
 #define __Pyx_PyObject_AsSString(s)    ((signed char*) __Pyx_PyObject_AsString(s))
 #define __Pyx_PyObject_AsUString(s)    ((unsigned char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_FromUString(s)  __Pyx_PyObject_FromString((const char*)s)
-#define __Pyx_PyBytes_FromUString(s)   __Pyx_PyBytes_FromString((const char*)s)
-#define __Pyx_PyByteArray_FromUString(s)   __Pyx_PyByteArray_FromString((const char*)s)
-#define __Pyx_PyStr_FromUString(s)     __Pyx_PyStr_FromString((const char*)s)
-#define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((const char*)s)
+#define __Pyx_PyObject_FromCString(s)  __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromCString(s)   __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromCString(s)   __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromCString(s)     __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
 #if PY_MAJOR_VERSION < 3
 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
 {
@@ -448,7 +360,7 @@ static int __Pyx_init_sys_getdefaultencoding_params(void) {
     const char* default_encoding_c;
     sys = PyImport_ImportModule("sys");
     if (!sys) goto bad;
-    default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+    default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
     Py_DECREF(sys);
     if (!default_encoding) goto bad;
     default_encoding_c = PyBytes_AsString(default_encoding);
@@ -556,7 +468,7 @@ static const char *__pyx_filename;
 
 
 static const char *__pyx_f[] = {
-  "_ssw_wrapper.pyx",
+  "skbio/alignment/_ssw_wrapper.pyx",
   "__init__.pxd",
   "type.pxd",
   "bool.pxd",
@@ -566,12 +478,12 @@ static const char *__pyx_f[] = {
 struct __Pyx_StructField_;
 #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
 typedef struct {
-  const char* name; /* for error messages only */
+  const char* name;
   struct __Pyx_StructField_* fields;
-  size_t size;     /* sizeof(type) */
-  size_t arraysize[8]; /* length of array in each dimension */
+  size_t size;
+  size_t arraysize[8];
   int ndim;
-  char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */
+  char typegroup;
   char is_unsigned;
   int flags;
 } __Pyx_TypeInfo;
@@ -598,7 +510,7 @@ typedef struct {
 } __Pyx_BufFmt_Context;
 
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
  * # in Cython to enable them only on the right systems.
  * 
  * ctypedef npy_int8       int8_t             # <<<<<<<<<<<<<<
@@ -607,7 +519,7 @@ typedef struct {
  */
 typedef npy_int8 __pyx_t_5numpy_int8_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727
  * 
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t             # <<<<<<<<<<<<<<
@@ -616,7 +528,7 @@ typedef npy_int8 __pyx_t_5numpy_int8_t;
  */
 typedef npy_int16 __pyx_t_5numpy_int16_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t             # <<<<<<<<<<<<<<
@@ -625,7 +537,7 @@ typedef npy_int16 __pyx_t_5numpy_int16_t;
  */
 typedef npy_int32 __pyx_t_5numpy_int32_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":729
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t
  * ctypedef npy_int64      int64_t             # <<<<<<<<<<<<<<
@@ -634,7 +546,7 @@ typedef npy_int32 __pyx_t_5numpy_int32_t;
  */
 typedef npy_int64 __pyx_t_5numpy_int64_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
  * #ctypedef npy_int128     int128_t
  * 
  * ctypedef npy_uint8      uint8_t             # <<<<<<<<<<<<<<
@@ -643,7 +555,7 @@ typedef npy_int64 __pyx_t_5numpy_int64_t;
  */
 typedef npy_uint8 __pyx_t_5numpy_uint8_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734
  * 
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t             # <<<<<<<<<<<<<<
@@ -652,7 +564,7 @@ typedef npy_uint8 __pyx_t_5numpy_uint8_t;
  */
 typedef npy_uint16 __pyx_t_5numpy_uint16_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t             # <<<<<<<<<<<<<<
@@ -661,7 +573,7 @@ typedef npy_uint16 __pyx_t_5numpy_uint16_t;
  */
 typedef npy_uint32 __pyx_t_5numpy_uint32_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":736
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t
  * ctypedef npy_uint64     uint64_t             # <<<<<<<<<<<<<<
@@ -670,7 +582,7 @@ typedef npy_uint32 __pyx_t_5numpy_uint32_t;
  */
 typedef npy_uint64 __pyx_t_5numpy_uint64_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740
  * #ctypedef npy_uint128    uint128_t
  * 
  * ctypedef npy_float32    float32_t             # <<<<<<<<<<<<<<
@@ -679,7 +591,7 @@ typedef npy_uint64 __pyx_t_5numpy_uint64_t;
  */
 typedef npy_float32 __pyx_t_5numpy_float32_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":741
  * 
  * ctypedef npy_float32    float32_t
  * ctypedef npy_float64    float64_t             # <<<<<<<<<<<<<<
@@ -688,7 +600,7 @@ typedef npy_float32 __pyx_t_5numpy_float32_t;
  */
 typedef npy_float64 __pyx_t_5numpy_float64_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750
  * # The int types are mapped a bit surprising --
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t             # <<<<<<<<<<<<<<
@@ -697,7 +609,7 @@ typedef npy_float64 __pyx_t_5numpy_float64_t;
  */
 typedef npy_long __pyx_t_5numpy_int_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t             # <<<<<<<<<<<<<<
@@ -706,7 +618,7 @@ typedef npy_long __pyx_t_5numpy_int_t;
  */
 typedef npy_longlong __pyx_t_5numpy_long_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t
  * ctypedef npy_longlong   longlong_t             # <<<<<<<<<<<<<<
@@ -715,7 +627,7 @@ typedef npy_longlong __pyx_t_5numpy_long_t;
  */
 typedef npy_longlong __pyx_t_5numpy_longlong_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754
  * ctypedef npy_longlong   longlong_t
  * 
  * ctypedef npy_ulong      uint_t             # <<<<<<<<<<<<<<
@@ -724,7 +636,7 @@ typedef npy_longlong __pyx_t_5numpy_longlong_t;
  */
 typedef npy_ulong __pyx_t_5numpy_uint_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
  * 
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t             # <<<<<<<<<<<<<<
@@ -733,7 +645,7 @@ typedef npy_ulong __pyx_t_5numpy_uint_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t
  * ctypedef npy_ulonglong  ulonglong_t             # <<<<<<<<<<<<<<
@@ -742,7 +654,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
  * ctypedef npy_ulonglong  ulonglong_t
  * 
  * ctypedef npy_intp       intp_t             # <<<<<<<<<<<<<<
@@ -751,7 +663,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
  */
 typedef npy_intp __pyx_t_5numpy_intp_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
  * 
  * ctypedef npy_intp       intp_t
  * ctypedef npy_uintp      uintp_t             # <<<<<<<<<<<<<<
@@ -760,7 +672,7 @@ typedef npy_intp __pyx_t_5numpy_intp_t;
  */
 typedef npy_uintp __pyx_t_5numpy_uintp_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761
  * ctypedef npy_uintp      uintp_t
  * 
  * ctypedef npy_double     float_t             # <<<<<<<<<<<<<<
@@ -769,7 +681,7 @@ typedef npy_uintp __pyx_t_5numpy_uintp_t;
  */
 typedef npy_double __pyx_t_5numpy_float_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
  * 
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t             # <<<<<<<<<<<<<<
@@ -778,7 +690,7 @@ typedef npy_double __pyx_t_5numpy_float_t;
  */
 typedef npy_double __pyx_t_5numpy_double_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t
  * ctypedef npy_longdouble longdouble_t             # <<<<<<<<<<<<<<
@@ -811,7 +723,7 @@ typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
 struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
 struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765
  * ctypedef npy_longdouble longdouble_t
  * 
  * ctypedef npy_cfloat      cfloat_t             # <<<<<<<<<<<<<<
@@ -820,7 +732,7 @@ struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
  */
 typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
  * 
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t             # <<<<<<<<<<<<<<
@@ -829,7 +741,7 @@ typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
  */
 typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":767
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t
  * ctypedef npy_clongdouble clongdouble_t             # <<<<<<<<<<<<<<
@@ -838,7 +750,7 @@ typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
  */
 typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
  * ctypedef npy_clongdouble clongdouble_t
  * 
  * ctypedef npy_cdouble     complex_t             # <<<<<<<<<<<<<<
@@ -865,7 +777,7 @@ struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure {
 };
 
 
-/* "skbio/alignment/_ssw_wrapper.pyx":416
+/* "skbio/alignment/_ssw_wrapper.pyx":401
  *         return tuples
  * 
  * cdef class StripedSmithWaterman:             # <<<<<<<<<<<<<<
@@ -906,7 +818,7 @@ struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure {
 static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
 
 
-/* "skbio/alignment/_ssw_wrapper.pyx":416
+/* "skbio/alignment/_ssw_wrapper.pyx":401
  *         return tuples
  * 
  * cdef class StripedSmithWaterman:             # <<<<<<<<<<<<<<
@@ -920,6 +832,8 @@ struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman {
   PyArrayObject *(*_convert_dict2d_to_matrix)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *);
 };
 static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
+
+/* --- Runtime support code (head) --- */
 #ifndef CYTHON_REFNANNY
   #define CYTHON_REFNANNY 0
 #endif
@@ -933,7 +847,7 @@ static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWate
     void (*FinishContext)(void**);
   } __Pyx_RefNannyAPIStruct;
   static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
-  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
+  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
   #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
 #ifdef WITH_THREAD
   #define __Pyx_RefNannySetupContext(name, acquire_gil) \
@@ -970,7 +884,7 @@ static struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWate
   #define __Pyx_XDECREF(r) Py_XDECREF(r)
   #define __Pyx_XGOTREF(r)
   #define __Pyx_XGIVEREF(r)
-#endif /* CYTHON_REFNANNY */
+#endif
 #define __Pyx_XDECREF_SET(r, v) do {                            \
         PyObject *tmp = (PyObject *) r;                         \
         r = v; __Pyx_XDECREF(tmp);                              \
@@ -997,18 +911,18 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject
 #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
 #endif
 
-static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
 
 static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
-    Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
+    Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
 
-static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
 
 static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
     PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
-    const char* function_name); /*proto*/
+    const char* function_name);
 
-static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /*proto*/
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
 
 #if PY_MAJOR_VERSION < 3
 #define __Pyx_PyString_Join __Pyx_PyBytes_Join
@@ -1024,7 +938,7 @@ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /*proto*/
     #define __Pyx_PyBytes_Join _PyBytes_Join
     #endif
 #else
-static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values); /*proto*/
+static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values);
 #endif
 
 #if CYTHON_COMPILING_IN_CPYTHON
@@ -1044,7 +958,7 @@ static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
 #endif
 
 #if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
 #else
 #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
 #endif
@@ -1070,7 +984,7 @@ static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
 #define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
 #endif
 
-static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
 
 #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
     (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
@@ -1093,19 +1007,31 @@ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j
 static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
                                                      int is_list, int wraparound, int boundscheck);
 
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
+#else
+#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
+#endif
+
 static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
 
 static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
 
-static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/
+static CYTHON_INLINE int __Pyx_IterFinish(void);
 
-static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/
+static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
 
 #include <string.h>
 
-static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
+static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
 
-static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
+static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
 
 #if PY_MAJOR_VERSION >= 3
 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
@@ -1113,22 +1039,22 @@ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int
 #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
 #endif
 
-static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
-static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb);
 
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
 
-static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
 
 static CYTHON_INLINE int  __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
     __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
 static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
 
-static void __Pyx_RaiseBufferFallbackError(void); /*proto*/
+static void __Pyx_RaiseBufferFallbackError(void);
 
 static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* proto */
 
-#if PY_MAJOR_VERSION >= 3
+#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
 static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
     PyObject *value;
     value = PyDict_GetItemWithError(d, key);
@@ -1148,18 +1074,33 @@ static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
     #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
 #endif
 
-static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/
+static void __Pyx_RaiseBufferIndexError(int axis);
 
 #define __Pyx_BufPtrCContig1d(type, buf, i0, s0) ((type)buf + i0)
-static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value); /*proto*/
-
 static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
 
-static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
+static int __Pyx_SetVtable(PyObject *dict, void *vtable);
+
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
 
-static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /*proto*/
+static PyObject *__Pyx_GetNameInClass(PyObject *nmspace, PyObject *name);
+
+typedef struct {
+    int code_line;
+    PyCodeObject* code_object;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+    int count;
+    int max_count;
+    __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
 
-static PyObject *__Pyx_GetNameInClass(PyObject *nmspace, PyObject *name); /*proto*/
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+                               int py_line, const char *filename);
 
 typedef struct {
   Py_ssize_t shape, strides, suboffsets;
@@ -1186,7 +1127,7 @@ typedef struct {
 static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
 static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
 
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
 
 static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
 
@@ -1316,29 +1257,16 @@ static int __Pyx_check_binary_version(void);
 #endif
 #endif
 
-static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
-
-static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);  /*proto*/
-
-typedef struct {
-    int code_line;
-    PyCodeObject* code_object;
-} __Pyx_CodeObjectCacheEntry;
-struct __Pyx_CodeObjectCache {
-    int count;
-    int max_count;
-    __Pyx_CodeObjectCacheEntry* entries;
-};
-static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
-static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
-static PyCodeObject *__pyx_find_code_object(int code_line);
-static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+static PyObject *__Pyx_ImportModule(const char *name);
 
-static void __Pyx_AddTraceback(const char *funcname, int c_line,
-                               int py_line, const char *filename); /*proto*/
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
 
-static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
 
+static PyObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___constructor(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *__pyx_v_self, s_align *__pyx_v_pointer); /* proto*/
+static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__seq_converter(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_sequence); /* proto*/
+static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__build_match_matrix(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_match_score, PyObject *__pyx_v_mismatch_score); /* proto*/
+static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__convert_dict2d_to_matrix(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_dict2d); /* proto*/
 
 /* Module declarations from 'cpython.version' */
 
@@ -1475,7 +1403,6 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
 static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_target_sequence); /* proto */
 static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__dealloc__(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self); /* proto */
 static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_6_get_bit_flag(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_self, PyObject *__pyx_v_override_skip_babp, PyObject *__pyx_v_score_only); /* proto */
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sequence1, PyObject *__pyx_v_sequence2, PyObject *__pyx_v_kwargs); /* proto */
 static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
 static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
 static PyObject *__pyx_tp_new_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
@@ -1506,16 +1433,13 @@ static char __pyx_k__3[] = "...";
 static char __pyx_k__5[] = "\n";
 static char __pyx_k__6[] = "";
 static char __pyx_k__8[] = "-";
-static char __pyx_k_id[] = "id";
 static char __pyx_k_np[] = "np";
 static char __pyx_k_end[] = "end";
-static char __pyx_k_get[] = "get";
 static char __pyx_k_ord[] = "ord";
 static char __pyx_k_r_r[] = "    {!r}: {!r}";
 static char __pyx_k_int8[] = "int8";
 static char __pyx_k_join[] = "join";
 static char __pyx_k_main[] = "__main__";
-static char __pyx_k_seqs[] = "seqs";
 static char __pyx_k_test[] = "__test__";
 static char __pyx_k_ACGTN[] = "ACGTN";
 static char __pyx_k_array[] = "array";
@@ -1524,30 +1448,24 @@ static char __pyx_k_cigar[] = "cigar";
 static char __pyx_k_dtype[] = "dtype";
 static char __pyx_k_empty[] = "empty";
 static char __pyx_k_numpy[] = "numpy";
-static char __pyx_k_query[] = "query";
 static char __pyx_k_range[] = "range";
-static char __pyx_k_score[] = "score";
 static char __pyx_k_format[] = "format";
 static char __pyx_k_import[] = "__import__";
-static char __pyx_k_kwargs[] = "kwargs";
-static char __pyx_k_target[] = "target";
+static char __pyx_k_Protein[] = "Protein";
 static char __pyx_k_Score_d[] = "Score: %d";
 static char __pyx_k_isdigit[] = "isdigit";
 static char __pyx_k_protein[] = "protein";
 static char __pyx_k_Length_d[] = "Length: %d";
+static char __pyx_k_Sequence[] = "Sequence";
 static char __pyx_k_gap_type[] = "gap_type";
 static char __pyx_k_property[] = "property";
 static char __pyx_k_sequence[] = "sequence";
 static char __pyx_k_Alignment[] = "Alignment";
 static char __pyx_k_Exception[] = "Exception";
-static char __pyx_k_alignment[] = "alignment";
 static char __pyx_k_enumerate[] = "enumerate";
 static char __pyx_k_mask_auto[] = "mask_auto";
 static char __pyx_k_mid_table[] = "mid_table";
 static char __pyx_k_query_end[] = "query_end";
-static char __pyx_k_sequence1[] = "sequence1";
-static char __pyx_k_sequence2[] = "sequence2";
-static char __pyx_k_start_end[] = "start_end";
 static char __pyx_k_ValueError[] = "ValueError";
 static char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
 static char __pyx_k_score_only[] = "score_only";
@@ -1564,27 +1482,22 @@ static char __pyx_k_get_bit_flag[] = "_get_bit_flag";
 static char __pyx_k_score_filter[] = "score_filter";
 static char __pyx_k_target_begin[] = "target_begin";
 static char __pyx_k_is_zero_based[] = "is_zero_based";
-static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
 static char __pyx_k_read_sequence[] = "read_sequence";
 static char __pyx_k_mismatch_score[] = "mismatch_score";
 static char __pyx_k_query_sequence[] = "query_sequence";
 static char __pyx_k_set_zero_based[] = "set_zero_based";
 static char __pyx_k_skbio_sequence[] = "skbio.sequence";
-static char __pyx_k_ProteinSequence[] = "ProteinSequence";
 static char __pyx_k_distance_filter[] = "distance_filter";
 static char __pyx_k_index_starts_at[] = "index_starts_at";
 static char __pyx_k_skbio_alignment[] = "skbio.alignment";
 static char __pyx_k_target_sequence[] = "target_sequence";
 static char __pyx_k_gap_open_penalty[] = "gap_open_penalty";
-static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer";
 static char __pyx_k_tuples_from_cigar[] = "_tuples_from_cigar";
-static char __pyx_k_NucleotideSequence[] = "NucleotideSequence";
 static char __pyx_k_gap_extend_penalty[] = "gap_extend_penalty";
 static char __pyx_k_override_skip_babp[] = "override_skip_babp";
 static char __pyx_k_reference_sequence[] = "reference_sequence";
 static char __pyx_k_suppress_sequences[] = "suppress_sequences";
 static char __pyx_k_target_end_optimal[] = "target_end_optimal";
-static char __pyx_k_start_end_positions[] = "start_end_positions";
 static char __pyx_k_substitution_matrix[] = "substitution_matrix";
 static char __pyx_k_get_aligned_sequence[] = "_get_aligned_sequence";
 static char __pyx_k_target_end_suboptimal[] = "target_end_suboptimal";
@@ -1592,13 +1505,10 @@ static char __pyx_k_aligned_query_sequence[] = "aligned_query_sequence";
 static char __pyx_k_ARNDCQEGHILKMFPSTWYVBZX[] = "ARNDCQEGHILKMFPSTWYVBZX*";
 static char __pyx_k_aligned_target_sequence[] = "aligned_target_sequence";
 static char __pyx_k_optimal_alignment_score[] = "optimal_alignment_score";
-static char __pyx_k_local_pairwise_align_ssw[] = "local_pairwise_align_ssw";
 static char __pyx_k_gap_open_penalty_must_be_0[] = "`gap_open_penalty` must be > 0";
 static char __pyx_k_suboptimal_alignment_score[] = "suboptimal_alignment_score";
 static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
 static char __pyx_k_gap_extend_penalty_must_be_0[] = "`gap_extend_penalty` must be > 0";
-static char __pyx_k_skbio_alignment__ssw_wrapper[] = "skbio.alignment._ssw_wrapper";
-static char __pyx_k_Users_jairideout_dev_scikit_bio[] = "/Users/jairideout/dev/scikit-bio/skbio/alignment/_ssw_wrapper.pyx";
 static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
 static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
 static char __pyx_k_Must_provide_a_substitution_matr[] = "Must provide a substitution matrix for protein sequences";
@@ -1619,11 +1529,10 @@ static PyObject *__pyx_n_s_M;
 static PyObject *__pyx_kp_s_Must_provide_a_substitution_matr;
 static PyObject *__pyx_n_s_N;
 static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
-static PyObject *__pyx_n_s_NucleotideSequence;
-static PyObject *__pyx_n_s_ProteinSequence;
+static PyObject *__pyx_n_s_Protein;
 static PyObject *__pyx_n_s_RuntimeError;
 static PyObject *__pyx_kp_s_Score_d;
-static PyObject *__pyx_kp_s_Users_jairideout_dev_scikit_bio;
+static PyObject *__pyx_n_s_Sequence;
 static PyObject *__pyx_n_s_ValueError;
 static PyObject *__pyx_kp_s__3;
 static PyObject *__pyx_kp_s__5;
@@ -1631,7 +1540,6 @@ static PyObject *__pyx_kp_s__6;
 static PyObject *__pyx_kp_s__8;
 static PyObject *__pyx_n_s_aligned_query_sequence;
 static PyObject *__pyx_n_s_aligned_target_sequence;
-static PyObject *__pyx_n_s_alignment;
 static PyObject *__pyx_n_s_array;
 static PyObject *__pyx_n_s_begin;
 static PyObject *__pyx_n_s_cigar;
@@ -1646,18 +1554,14 @@ static PyObject *__pyx_kp_s_gap_extend_penalty_must_be_0;
 static PyObject *__pyx_n_s_gap_open_penalty;
 static PyObject *__pyx_kp_s_gap_open_penalty_must_be_0;
 static PyObject *__pyx_n_s_gap_type;
-static PyObject *__pyx_n_s_get;
 static PyObject *__pyx_n_s_get_aligned_sequence;
 static PyObject *__pyx_n_s_get_bit_flag;
-static PyObject *__pyx_n_s_id;
 static PyObject *__pyx_n_s_import;
 static PyObject *__pyx_n_s_index_starts_at;
 static PyObject *__pyx_n_s_int8;
 static PyObject *__pyx_n_s_is_zero_based;
 static PyObject *__pyx_n_s_isdigit;
 static PyObject *__pyx_n_s_join;
-static PyObject *__pyx_n_s_kwargs;
-static PyObject *__pyx_n_s_local_pairwise_align_ssw;
 static PyObject *__pyx_n_s_main;
 static PyObject *__pyx_n_s_mask_auto;
 static PyObject *__pyx_n_s_mask_length;
@@ -1675,10 +1579,7 @@ static PyObject *__pyx_n_s_ord;
 static PyObject *__pyx_n_s_override_skip_babp;
 static PyObject *__pyx_n_s_property;
 static PyObject *__pyx_n_s_protein;
-static PyObject *__pyx_n_s_pyx_getbuffer;
-static PyObject *__pyx_n_s_pyx_releasebuffer;
 static PyObject *__pyx_n_s_pyx_vtable;
-static PyObject *__pyx_n_s_query;
 static PyObject *__pyx_n_s_query_begin;
 static PyObject *__pyx_n_s_query_end;
 static PyObject *__pyx_n_s_query_sequence;
@@ -1687,24 +1588,16 @@ static PyObject *__pyx_n_s_range;
 static PyObject *__pyx_n_s_read_sequence;
 static PyObject *__pyx_n_s_reference_sequence;
 static PyObject *__pyx_kp_s_s;
-static PyObject *__pyx_n_s_score;
 static PyObject *__pyx_n_s_score_filter;
 static PyObject *__pyx_n_s_score_only;
 static PyObject *__pyx_n_s_score_size;
-static PyObject *__pyx_n_s_seqs;
 static PyObject *__pyx_n_s_sequence;
-static PyObject *__pyx_n_s_sequence1;
-static PyObject *__pyx_n_s_sequence2;
 static PyObject *__pyx_n_s_set_zero_based;
 static PyObject *__pyx_n_s_skbio_alignment;
-static PyObject *__pyx_n_s_skbio_alignment__ssw_wrapper;
 static PyObject *__pyx_n_s_skbio_sequence;
-static PyObject *__pyx_n_s_start_end;
-static PyObject *__pyx_n_s_start_end_positions;
 static PyObject *__pyx_n_s_suboptimal_alignment_score;
 static PyObject *__pyx_n_s_substitution_matrix;
 static PyObject *__pyx_n_s_suppress_sequences;
-static PyObject *__pyx_n_s_target;
 static PyObject *__pyx_n_s_target_begin;
 static PyObject *__pyx_n_s_target_end_optimal;
 static PyObject *__pyx_n_s_target_end_suboptimal;
@@ -1752,10 +1645,8 @@ static PyObject *__pyx_tuple__14;
 static PyObject *__pyx_tuple__15;
 static PyObject *__pyx_tuple__16;
 static PyObject *__pyx_tuple__17;
-static PyObject *__pyx_tuple__18;
-static PyObject *__pyx_codeobj__19;
 
-/* "skbio/alignment/_ssw_wrapper.pyx":107
+/* "skbio/alignment/_ssw_wrapper.pyx":92
  *     cdef str _cigar_string
  * 
  *     def __cinit__(self, read_sequence, reference_sequence, index_starts_at):             # <<<<<<<<<<<<<<
@@ -1796,16 +1687,16 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_1__cin
         case  1:
         if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_reference_sequence)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  2:
         if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_index_starts_at)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
       goto __pyx_L5_argtuple_error;
@@ -1820,7 +1711,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_1__cin
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -1843,14 +1734,14 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cini
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("__cinit__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":111
+  /* "skbio/alignment/_ssw_wrapper.pyx":96
  *         # treated sematically as a private output of ssw.c like the `s_align`
  *         # struct
  *         self.read_sequence = read_sequence             # <<<<<<<<<<<<<<
  *         self.reference_sequence = reference_sequence
  *         self.index_starts_at = index_starts_at
  */
-  if (!(likely(PyString_CheckExact(__pyx_v_read_sequence))||((__pyx_v_read_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_read_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(PyString_CheckExact(__pyx_v_read_sequence))||((__pyx_v_read_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_read_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_read_sequence;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -1859,14 +1750,14 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cini
   __pyx_v_self->read_sequence = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":112
+  /* "skbio/alignment/_ssw_wrapper.pyx":97
  *         # struct
  *         self.read_sequence = read_sequence
  *         self.reference_sequence = reference_sequence             # <<<<<<<<<<<<<<
  *         self.index_starts_at = index_starts_at
  * 
  */
-  if (!(likely(PyString_CheckExact(__pyx_v_reference_sequence))||((__pyx_v_reference_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_reference_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(PyString_CheckExact(__pyx_v_reference_sequence))||((__pyx_v_reference_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_reference_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_reference_sequence;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -1875,17 +1766,17 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cini
   __pyx_v_self->reference_sequence = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":113
+  /* "skbio/alignment/_ssw_wrapper.pyx":98
  *         self.read_sequence = read_sequence
  *         self.reference_sequence = reference_sequence
  *         self.index_starts_at = index_starts_at             # <<<<<<<<<<<<<<
  * 
  *     cdef __constructor(self, s_align* pointer):
  */
-  __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_index_starts_at); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_index_starts_at); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_self->index_starts_at = __pyx_t_2;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":107
+  /* "skbio/alignment/_ssw_wrapper.pyx":92
  *     cdef str _cigar_string
  * 
  *     def __cinit__(self, read_sequence, reference_sequence, index_starts_at):             # <<<<<<<<<<<<<<
@@ -1905,7 +1796,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure___cini
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":115
+/* "skbio/alignment/_ssw_wrapper.pyx":100
  *         self.index_starts_at = index_starts_at
  * 
  *     cdef __constructor(self, s_align* pointer):             # <<<<<<<<<<<<<<
@@ -1918,7 +1809,7 @@ static PyObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure__
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("__constructor", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":116
+  /* "skbio/alignment/_ssw_wrapper.pyx":101
  * 
  *     cdef __constructor(self, s_align* pointer):
  *         self.p = pointer             # <<<<<<<<<<<<<<
@@ -1927,7 +1818,7 @@ static PyObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure__
  */
   __pyx_v_self->p = __pyx_v_pointer;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":115
+  /* "skbio/alignment/_ssw_wrapper.pyx":100
  *         self.index_starts_at = index_starts_at
  * 
  *     cdef __constructor(self, s_align* pointer):             # <<<<<<<<<<<<<<
@@ -1942,7 +1833,7 @@ static PyObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure__
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":118
+/* "skbio/alignment/_ssw_wrapper.pyx":103
  *         self.p = pointer
  * 
  *     def __dealloc__(self):             # <<<<<<<<<<<<<<
@@ -1966,7 +1857,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__de
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("__dealloc__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":119
+  /* "skbio/alignment/_ssw_wrapper.pyx":104
  * 
  *     def __dealloc__(self):
  *         if self.p is not NULL:             # <<<<<<<<<<<<<<
@@ -1976,7 +1867,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__de
   __pyx_t_1 = ((__pyx_v_self->p != NULL) != 0);
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":120
+    /* "skbio/alignment/_ssw_wrapper.pyx":105
  *     def __dealloc__(self):
  *         if self.p is not NULL:
  *             align_destroy(self.p)             # <<<<<<<<<<<<<<
@@ -1988,7 +1879,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__de
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":118
+  /* "skbio/alignment/_ssw_wrapper.pyx":103
  *         self.p = pointer
  * 
  *     def __dealloc__(self):             # <<<<<<<<<<<<<<
@@ -2000,7 +1891,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_2__de
   __Pyx_RefNannyFinishContext();
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":122
+/* "skbio/alignment/_ssw_wrapper.pyx":107
  *             align_destroy(self.p)
  * 
  *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
@@ -2030,7 +1921,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("__getitem__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":123
+  /* "skbio/alignment/_ssw_wrapper.pyx":108
  * 
  *     def __getitem__(self, key):
  *         return getattr(self, key)             # <<<<<<<<<<<<<<
@@ -2038,13 +1929,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     def __repr__(self):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_GetAttr(((PyObject *)__pyx_v_self), __pyx_v_key); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetAttr(((PyObject *)__pyx_v_self), __pyx_v_key); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":122
+  /* "skbio/alignment/_ssw_wrapper.pyx":107
  *             align_destroy(self.p)
  * 
  *     def __getitem__(self, key):             # <<<<<<<<<<<<<<
@@ -2063,7 +1954,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":125
+/* "skbio/alignment/_ssw_wrapper.pyx":110
  *         return getattr(self, key)
  * 
  *     def __repr__(self):             # <<<<<<<<<<<<<<
@@ -2095,54 +1986,57 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   PyObject *__pyx_t_4 = NULL;
   PyObject *__pyx_t_5 = NULL;
   PyObject *__pyx_t_6 = NULL;
+  PyObject *__pyx_t_7 = NULL;
+  Py_ssize_t __pyx_t_8;
+  PyObject *__pyx_t_9 = NULL;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("__repr__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":126
+  /* "skbio/alignment/_ssw_wrapper.pyx":111
  * 
  *     def __repr__(self):
  *         data = ['optimal_alignment_score', 'suboptimal_alignment_score',             # <<<<<<<<<<<<<<
  *                 'query_begin', 'query_end', 'target_begin',
  *                 'target_end_optimal', 'target_end_suboptimal', 'cigar',
  */
-  __pyx_t_1 = PyList_New(10); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(10); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_n_s_optimal_alignment_score);
-  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_optimal_alignment_score);
   __Pyx_GIVEREF(__pyx_n_s_optimal_alignment_score);
+  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_optimal_alignment_score);
   __Pyx_INCREF(__pyx_n_s_suboptimal_alignment_score);
-  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_suboptimal_alignment_score);
   __Pyx_GIVEREF(__pyx_n_s_suboptimal_alignment_score);
+  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_suboptimal_alignment_score);
   __Pyx_INCREF(__pyx_n_s_query_begin);
-  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_n_s_query_begin);
   __Pyx_GIVEREF(__pyx_n_s_query_begin);
+  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_n_s_query_begin);
   __Pyx_INCREF(__pyx_n_s_query_end);
-  PyList_SET_ITEM(__pyx_t_1, 3, __pyx_n_s_query_end);
   __Pyx_GIVEREF(__pyx_n_s_query_end);
+  PyList_SET_ITEM(__pyx_t_1, 3, __pyx_n_s_query_end);
   __Pyx_INCREF(__pyx_n_s_target_begin);
-  PyList_SET_ITEM(__pyx_t_1, 4, __pyx_n_s_target_begin);
   __Pyx_GIVEREF(__pyx_n_s_target_begin);
+  PyList_SET_ITEM(__pyx_t_1, 4, __pyx_n_s_target_begin);
   __Pyx_INCREF(__pyx_n_s_target_end_optimal);
-  PyList_SET_ITEM(__pyx_t_1, 5, __pyx_n_s_target_end_optimal);
   __Pyx_GIVEREF(__pyx_n_s_target_end_optimal);
+  PyList_SET_ITEM(__pyx_t_1, 5, __pyx_n_s_target_end_optimal);
   __Pyx_INCREF(__pyx_n_s_target_end_suboptimal);
-  PyList_SET_ITEM(__pyx_t_1, 6, __pyx_n_s_target_end_suboptimal);
   __Pyx_GIVEREF(__pyx_n_s_target_end_suboptimal);
+  PyList_SET_ITEM(__pyx_t_1, 6, __pyx_n_s_target_end_suboptimal);
   __Pyx_INCREF(__pyx_n_s_cigar);
-  PyList_SET_ITEM(__pyx_t_1, 7, __pyx_n_s_cigar);
   __Pyx_GIVEREF(__pyx_n_s_cigar);
+  PyList_SET_ITEM(__pyx_t_1, 7, __pyx_n_s_cigar);
   __Pyx_INCREF(__pyx_n_s_query_sequence);
-  PyList_SET_ITEM(__pyx_t_1, 8, __pyx_n_s_query_sequence);
   __Pyx_GIVEREF(__pyx_n_s_query_sequence);
+  PyList_SET_ITEM(__pyx_t_1, 8, __pyx_n_s_query_sequence);
   __Pyx_INCREF(__pyx_n_s_target_sequence);
-  PyList_SET_ITEM(__pyx_t_1, 9, __pyx_n_s_target_sequence);
   __Pyx_GIVEREF(__pyx_n_s_target_sequence);
+  PyList_SET_ITEM(__pyx_t_1, 9, __pyx_n_s_target_sequence);
   __pyx_v_data = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":130
+  /* "skbio/alignment/_ssw_wrapper.pyx":115
  *                 'target_end_optimal', 'target_end_suboptimal', 'cigar',
  *                 'query_sequence', 'target_sequence']
  *         return "{\n%s\n}" % ',\n'.join([             # <<<<<<<<<<<<<<
@@ -2150,10 +2044,10 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  * 
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":131
+  /* "skbio/alignment/_ssw_wrapper.pyx":116
  *                 'query_sequence', 'target_sequence']
  *         return "{\n%s\n}" % ',\n'.join([
  *             "    {!r}: {!r}".format(k, self[k]) for k in data])             # <<<<<<<<<<<<<<
@@ -2164,51 +2058,67 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   for (;;) {
     if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
     #if CYTHON_COMPILING_IN_CPYTHON
-    __pyx_t_4 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     #else
-    __pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
     #endif
     __Pyx_XDECREF_SET(__pyx_v_k, __pyx_t_4);
     __pyx_t_4 = 0;
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_r_r, __pyx_n_s_format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_k); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_r_r, __pyx_n_s_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_k); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_6);
+    __pyx_t_7 = NULL;
+    __pyx_t_8 = 0;
+    if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_5))) {
+      __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
+      if (likely(__pyx_t_7)) {
+        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
+        __Pyx_INCREF(__pyx_t_7);
+        __Pyx_INCREF(function);
+        __Pyx_DECREF_SET(__pyx_t_5, function);
+        __pyx_t_8 = 1;
+      }
+    }
+    __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_9);
+    if (__pyx_t_7) {
+      __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
+    }
     __Pyx_INCREF(__pyx_v_k);
-    PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_k);
     __Pyx_GIVEREF(__pyx_v_k);
-    PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_5);
-    __pyx_t_5 = 0;
-    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_v_k);
+    __Pyx_GIVEREF(__pyx_t_6);
+    PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_t_6);
+    __pyx_t_6 = 0;
+    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
     __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   }
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":130
+  /* "skbio/alignment/_ssw_wrapper.pyx":115
  *                 'target_end_optimal', 'target_end_suboptimal', 'cigar',
  *                 'query_sequence', 'target_sequence']
  *         return "{\n%s\n}" % ',\n'.join([             # <<<<<<<<<<<<<<
  *             "    {!r}: {!r}".format(k, self[k]) for k in data])
  * 
  */
-  __pyx_t_2 = __Pyx_PyString_Join(__pyx_kp_s_, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyString_Join(__pyx_kp_s_, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_s, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_s, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":125
+  /* "skbio/alignment/_ssw_wrapper.pyx":110
  *         return getattr(self, key)
  * 
  *     def __repr__(self):             # <<<<<<<<<<<<<<
@@ -2223,6 +2133,8 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_XDECREF(__pyx_t_4);
   __Pyx_XDECREF(__pyx_t_5);
   __Pyx_XDECREF(__pyx_t_6);
+  __Pyx_XDECREF(__pyx_t_7);
+  __Pyx_XDECREF(__pyx_t_9);
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __pyx_r = NULL;
   __pyx_L0:;
@@ -2233,7 +2145,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":133
+/* "skbio/alignment/_ssw_wrapper.pyx":118
  *             "    {!r}: {!r}".format(k, self[k]) for k in data])
  * 
  *     def __str__(self):             # <<<<<<<<<<<<<<
@@ -2266,145 +2178,146 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   PyObject *__pyx_t_2 = NULL;
   int __pyx_t_3;
   int __pyx_t_4;
-  int __pyx_t_5;
-  Py_ssize_t __pyx_t_6;
+  Py_ssize_t __pyx_t_5;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("__str__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":134
+  /* "skbio/alignment/_ssw_wrapper.pyx":119
  * 
  *     def __str__(self):
  *         score = "Score: %d" % self.optimal_alignment_score             # <<<<<<<<<<<<<<
  *         if self.query_sequence and self.cigar:
  *             target = self.aligned_target_sequence
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Score_d, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Score_d, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_score = ((PyObject*)__pyx_t_2);
   __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":135
+  /* "skbio/alignment/_ssw_wrapper.pyx":120
  *     def __str__(self):
  *         score = "Score: %d" % self.optimal_alignment_score
  *         if self.query_sequence and self.cigar:             # <<<<<<<<<<<<<<
  *             target = self.aligned_target_sequence
  *             query = self.aligned_query_sequence
  */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  if (__pyx_t_3) {
-    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __pyx_t_5 = __pyx_t_4;
+  if (__pyx_t_4) {
   } else {
-    __pyx_t_5 = __pyx_t_3;
+    __pyx_t_3 = __pyx_t_4;
+    goto __pyx_L4_bool_binop_done;
   }
-  if (__pyx_t_5) {
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_t_3 = __pyx_t_4;
+  __pyx_L4_bool_binop_done:;
+  if (__pyx_t_3) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":136
+    /* "skbio/alignment/_ssw_wrapper.pyx":121
  *         score = "Score: %d" % self.optimal_alignment_score
  *         if self.query_sequence and self.cigar:
  *             target = self.aligned_target_sequence             # <<<<<<<<<<<<<<
  *             query = self.aligned_query_sequence
  *             align_len = len(query)
  */
-    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_v_target = __pyx_t_2;
     __pyx_t_2 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":137
+    /* "skbio/alignment/_ssw_wrapper.pyx":122
  *         if self.query_sequence and self.cigar:
  *             target = self.aligned_target_sequence
  *             query = self.aligned_query_sequence             # <<<<<<<<<<<<<<
  *             align_len = len(query)
  *             if align_len > 13:
  */
-    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_v_query = __pyx_t_2;
     __pyx_t_2 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":138
+    /* "skbio/alignment/_ssw_wrapper.pyx":123
  *             target = self.aligned_target_sequence
  *             query = self.aligned_query_sequence
  *             align_len = len(query)             # <<<<<<<<<<<<<<
  *             if align_len > 13:
  *                 target = target[:10] + "..."
  */
-    __pyx_t_6 = PyObject_Length(__pyx_v_query); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_2 = PyInt_FromSsize_t(__pyx_t_6); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = PyObject_Length(__pyx_v_query); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_v_align_len = __pyx_t_2;
     __pyx_t_2 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":139
+    /* "skbio/alignment/_ssw_wrapper.pyx":124
  *             query = self.aligned_query_sequence
  *             align_len = len(query)
  *             if align_len > 13:             # <<<<<<<<<<<<<<
  *                 target = target[:10] + "..."
  *                 query = query[:10] + "..."
  */
-    __pyx_t_2 = PyObject_RichCompare(__pyx_v_align_len, __pyx_int_13, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = PyObject_RichCompare(__pyx_v_align_len, __pyx_int_13, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    if (__pyx_t_5) {
+    if (__pyx_t_3) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":140
+      /* "skbio/alignment/_ssw_wrapper.pyx":125
  *             align_len = len(query)
  *             if align_len > 13:
  *                 target = target[:10] + "..."             # <<<<<<<<<<<<<<
  *                 query = query[:10] + "..."
  * 
  */
-      __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_target, 0, 10, NULL, NULL, &__pyx_slice__2, 0, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_target, 0, 10, NULL, NULL, &__pyx_slice__2, 0, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
-      __pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_kp_s__3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_kp_s__3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
       __Pyx_DECREF_SET(__pyx_v_target, __pyx_t_1);
       __pyx_t_1 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":141
+      /* "skbio/alignment/_ssw_wrapper.pyx":126
  *             if align_len > 13:
  *                 target = target[:10] + "..."
  *                 query = query[:10] + "..."             # <<<<<<<<<<<<<<
  * 
  *             length = "Length: %d" % align_len
  */
-      __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_query, 0, 10, NULL, NULL, &__pyx_slice__4, 0, 1, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_query, 0, 10, NULL, NULL, &__pyx_slice__4, 0, 1, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
-      __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_kp_s__3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_kp_s__3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
       __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
       __Pyx_DECREF_SET(__pyx_v_query, __pyx_t_2);
       __pyx_t_2 = 0;
-      goto __pyx_L4;
+      goto __pyx_L6;
     }
-    __pyx_L4:;
+    __pyx_L6:;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":143
+    /* "skbio/alignment/_ssw_wrapper.pyx":128
  *                 query = query[:10] + "..."
  * 
  *             length = "Length: %d" % align_len             # <<<<<<<<<<<<<<
  *             return "\n".join([query, target, score, length])
  *         return score
  */
-    __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Length_d, __pyx_v_align_len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Length_d, __pyx_v_align_len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_v_length = ((PyObject*)__pyx_t_2);
     __pyx_t_2 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":144
+    /* "skbio/alignment/_ssw_wrapper.pyx":129
  * 
  *             length = "Length: %d" % align_len
  *             return "\n".join([query, target, score, length])             # <<<<<<<<<<<<<<
@@ -2412,21 +2325,21 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  * 
  */
     __Pyx_XDECREF(__pyx_r);
-    __pyx_t_2 = PyList_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = PyList_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __Pyx_INCREF(__pyx_v_query);
-    PyList_SET_ITEM(__pyx_t_2, 0, __pyx_v_query);
     __Pyx_GIVEREF(__pyx_v_query);
+    PyList_SET_ITEM(__pyx_t_2, 0, __pyx_v_query);
     __Pyx_INCREF(__pyx_v_target);
-    PyList_SET_ITEM(__pyx_t_2, 1, __pyx_v_target);
     __Pyx_GIVEREF(__pyx_v_target);
+    PyList_SET_ITEM(__pyx_t_2, 1, __pyx_v_target);
     __Pyx_INCREF(__pyx_v_score);
-    PyList_SET_ITEM(__pyx_t_2, 2, __pyx_v_score);
     __Pyx_GIVEREF(__pyx_v_score);
+    PyList_SET_ITEM(__pyx_t_2, 2, __pyx_v_score);
     __Pyx_INCREF(__pyx_v_length);
-    PyList_SET_ITEM(__pyx_t_2, 3, __pyx_v_length);
     __Pyx_GIVEREF(__pyx_v_length);
-    __pyx_t_1 = __Pyx_PyString_Join(__pyx_kp_s__5, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    PyList_SET_ITEM(__pyx_t_2, 3, __pyx_v_length);
+    __pyx_t_1 = __Pyx_PyString_Join(__pyx_kp_s__5, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
     __pyx_r = __pyx_t_1;
@@ -2434,7 +2347,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     goto __pyx_L0;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":145
+  /* "skbio/alignment/_ssw_wrapper.pyx":130
  *             length = "Length: %d" % align_len
  *             return "\n".join([query, target, score, length])
  *         return score             # <<<<<<<<<<<<<<
@@ -2446,7 +2359,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_score;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":133
+  /* "skbio/alignment/_ssw_wrapper.pyx":118
  *             "    {!r}: {!r}".format(k, self[k]) for k in data])
  * 
  *     def __str__(self):             # <<<<<<<<<<<<<<
@@ -2471,7 +2384,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":148
+/* "skbio/alignment/_ssw_wrapper.pyx":133
  * 
  *     @property
  *     def optimal_alignment_score(self):             # <<<<<<<<<<<<<<
@@ -2502,7 +2415,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("optimal_alignment_score", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":157
+  /* "skbio/alignment/_ssw_wrapper.pyx":142
  * 
  *         """
  *         return self.p.score1             # <<<<<<<<<<<<<<
@@ -2510,13 +2423,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":148
+  /* "skbio/alignment/_ssw_wrapper.pyx":133
  * 
  *     @property
  *     def optimal_alignment_score(self):             # <<<<<<<<<<<<<<
@@ -2535,7 +2448,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":160
+/* "skbio/alignment/_ssw_wrapper.pyx":145
  * 
  *     @property
  *     def suboptimal_alignment_score(self):             # <<<<<<<<<<<<<<
@@ -2566,7 +2479,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("suboptimal_alignment_score", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":169
+  /* "skbio/alignment/_ssw_wrapper.pyx":154
  * 
  *         """
  *         return self.p.score2             # <<<<<<<<<<<<<<
@@ -2574,13 +2487,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_npy_uint16(__pyx_v_self->p->score2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":160
+  /* "skbio/alignment/_ssw_wrapper.pyx":145
  * 
  *     @property
  *     def suboptimal_alignment_score(self):             # <<<<<<<<<<<<<<
@@ -2599,7 +2512,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":172
+/* "skbio/alignment/_ssw_wrapper.pyx":157
  * 
  *     @property
  *     def target_begin(self):             # <<<<<<<<<<<<<<
@@ -2631,7 +2544,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("target_begin", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":185
+  /* "skbio/alignment/_ssw_wrapper.pyx":170
  * 
  *         """
  *         return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1             # <<<<<<<<<<<<<<
@@ -2640,7 +2553,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  */
   __Pyx_XDECREF(__pyx_r);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":186
+  /* "skbio/alignment/_ssw_wrapper.pyx":171
  *         """
  *         return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1
  *                                                             >= 0) else -1             # <<<<<<<<<<<<<<
@@ -2649,14 +2562,14 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  */
   if (((__pyx_v_self->p->ref_begin1 >= 0) != 0)) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":185
+    /* "skbio/alignment/_ssw_wrapper.pyx":170
  * 
  *         """
  *         return self.p.ref_begin1 + self.index_starts_at if (self.p.ref_begin1             # <<<<<<<<<<<<<<
  *                                                             >= 0) else -1
  * 
  */
-    __pyx_t_2 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_t_1 = __pyx_t_2;
     __pyx_t_2 = 0;
@@ -2668,7 +2581,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":172
+  /* "skbio/alignment/_ssw_wrapper.pyx":157
  * 
  *     @property
  *     def target_begin(self):             # <<<<<<<<<<<<<<
@@ -2688,7 +2601,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":189
+/* "skbio/alignment/_ssw_wrapper.pyx":174
  * 
  *     @property
  *     def target_end_optimal(self):             # <<<<<<<<<<<<<<
@@ -2719,7 +2632,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("target_end_optimal", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":203
+  /* "skbio/alignment/_ssw_wrapper.pyx":188
  * 
  *         """
  *         return self.p.ref_end1 + self.index_starts_at             # <<<<<<<<<<<<<<
@@ -2727,13 +2640,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":189
+  /* "skbio/alignment/_ssw_wrapper.pyx":174
  * 
  *     @property
  *     def target_end_optimal(self):             # <<<<<<<<<<<<<<
@@ -2752,7 +2665,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":206
+/* "skbio/alignment/_ssw_wrapper.pyx":191
  * 
  *     @property
  *     def target_end_suboptimal(self):             # <<<<<<<<<<<<<<
@@ -2783,7 +2696,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("target_end_suboptimal", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":220
+  /* "skbio/alignment/_ssw_wrapper.pyx":205
  * 
  *         """
  *         return self.p.ref_end2 + self.index_starts_at             # <<<<<<<<<<<<<<
@@ -2791,13 +2704,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_end2 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->ref_end2 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":206
+  /* "skbio/alignment/_ssw_wrapper.pyx":191
  * 
  *     @property
  *     def target_end_suboptimal(self):             # <<<<<<<<<<<<<<
@@ -2816,7 +2729,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":223
+/* "skbio/alignment/_ssw_wrapper.pyx":208
  * 
  *     @property
  *     def query_begin(self):             # <<<<<<<<<<<<<<
@@ -2848,7 +2761,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("query_begin", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":236
+  /* "skbio/alignment/_ssw_wrapper.pyx":221
  * 
  *         """
  *         return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1             # <<<<<<<<<<<<<<
@@ -2857,7 +2770,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  */
   __Pyx_XDECREF(__pyx_r);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":237
+  /* "skbio/alignment/_ssw_wrapper.pyx":222
  *         """
  *         return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1
  *                                                              >= 0) else -1             # <<<<<<<<<<<<<<
@@ -2866,14 +2779,14 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  */
   if (((__pyx_v_self->p->read_begin1 >= 0) != 0)) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":236
+    /* "skbio/alignment/_ssw_wrapper.pyx":221
  * 
  *         """
  *         return self.p.read_begin1 + self.index_starts_at if (self.p.read_begin1             # <<<<<<<<<<<<<<
  *                                                              >= 0) else -1
  * 
  */
-    __pyx_t_2 = __Pyx_PyInt_From_int((__pyx_v_self->p->read_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyInt_From_int((__pyx_v_self->p->read_begin1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_t_1 = __pyx_t_2;
     __pyx_t_2 = 0;
@@ -2885,7 +2798,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":223
+  /* "skbio/alignment/_ssw_wrapper.pyx":208
  * 
  *     @property
  *     def query_begin(self):             # <<<<<<<<<<<<<<
@@ -2905,7 +2818,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":240
+/* "skbio/alignment/_ssw_wrapper.pyx":225
  * 
  *     @property
  *     def query_end(self):             # <<<<<<<<<<<<<<
@@ -2936,7 +2849,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("query_end", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":253
+  /* "skbio/alignment/_ssw_wrapper.pyx":238
  * 
  *         """
  *         return self.p.read_end1 + self.index_starts_at             # <<<<<<<<<<<<<<
@@ -2944,13 +2857,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     @property
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->read_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_self->p->read_end1 + __pyx_v_self->index_starts_at)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":240
+  /* "skbio/alignment/_ssw_wrapper.pyx":225
  * 
  *     @property
  *     def query_end(self):             # <<<<<<<<<<<<<<
@@ -2969,7 +2882,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":256
+/* "skbio/alignment/_ssw_wrapper.pyx":241
  * 
  *     @property
  *     def cigar(self):             # <<<<<<<<<<<<<<
@@ -3009,7 +2922,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("cigar", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":278
+  /* "skbio/alignment/_ssw_wrapper.pyx":263
  *         """
  *         # Memoization! (1/2)
  *         if self._cigar_string is not None:             # <<<<<<<<<<<<<<
@@ -3020,7 +2933,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_t_2 = (__pyx_t_1 != 0);
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":279
+    /* "skbio/alignment/_ssw_wrapper.pyx":264
  *         # Memoization! (1/2)
  *         if self._cigar_string is not None:
  *             return self._cigar_string             # <<<<<<<<<<<<<<
@@ -3033,19 +2946,19 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     goto __pyx_L0;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":280
+  /* "skbio/alignment/_ssw_wrapper.pyx":265
  *         if self._cigar_string is not None:
  *             return self._cigar_string
  *         cigar_list = []             # <<<<<<<<<<<<<<
  *         for i in range(self.p.cigarLen):
  *             # stored the same as that in BAM format,
  */
-  __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 280; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_v_cigar_list = ((PyObject*)__pyx_t_3);
   __pyx_t_3 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":281
+  /* "skbio/alignment/_ssw_wrapper.pyx":266
  *             return self._cigar_string
  *         cigar_list = []
  *         for i in range(self.p.cigarLen):             # <<<<<<<<<<<<<<
@@ -3056,60 +2969,60 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
     __pyx_v_i = __pyx_t_5;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":286
+    /* "skbio/alignment/_ssw_wrapper.pyx":271
  * 
  *             # Length, remove first 4 bits
  *             cigar_list.append(str(self.p.cigar[i] >> 4))             # <<<<<<<<<<<<<<
  *             # M/I/D, lookup first 4 bits in the mid_table
  *             cigar_list.append(mid_table[self.p.cigar[i] & 0xf])
  */
-    __pyx_t_3 = __Pyx_PyInt_From_long(((__pyx_v_self->p->cigar[__pyx_v_i]) >> 4)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyInt_From_long(((__pyx_v_self->p->cigar[__pyx_v_i]) >> 4)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_6);
-    PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
     __Pyx_GIVEREF(__pyx_t_3);
+    PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
     __pyx_t_3 = 0;
-    __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_3); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_3); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":288
+    /* "skbio/alignment/_ssw_wrapper.pyx":273
  *             cigar_list.append(str(self.p.cigar[i] >> 4))
  *             # M/I/D, lookup first 4 bits in the mid_table
  *             cigar_list.append(mid_table[self.p.cigar[i] & 0xf])             # <<<<<<<<<<<<<<
  *         # Memoization! (2/2)
  *         self._cigar_string = "".join(cigar_list)
  */
-    __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_mid_table); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_mid_table); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __pyx_t_8 = ((__pyx_v_self->p->cigar[__pyx_v_i]) & 0xf);
-    __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_3, __pyx_t_8, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_3, __pyx_t_8, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_6);
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_6); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_cigar_list, __pyx_t_6); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":290
+  /* "skbio/alignment/_ssw_wrapper.pyx":275
  *             cigar_list.append(mid_table[self.p.cigar[i] & 0xf])
  *         # Memoization! (2/2)
  *         self._cigar_string = "".join(cigar_list)             # <<<<<<<<<<<<<<
  *         return self._cigar_string
  * 
  */
-  __pyx_t_6 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_cigar_list); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_cigar_list); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_6);
-  if (!(likely(PyString_CheckExact(__pyx_t_6))||((__pyx_t_6) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_t_6)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(PyString_CheckExact(__pyx_t_6))||((__pyx_t_6) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_t_6)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GIVEREF(__pyx_t_6);
   __Pyx_GOTREF(__pyx_v_self->_cigar_string);
   __Pyx_DECREF(__pyx_v_self->_cigar_string);
   __pyx_v_self->_cigar_string = ((PyObject*)__pyx_t_6);
   __pyx_t_6 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":291
+  /* "skbio/alignment/_ssw_wrapper.pyx":276
  *         # Memoization! (2/2)
  *         self._cigar_string = "".join(cigar_list)
  *         return self._cigar_string             # <<<<<<<<<<<<<<
@@ -3121,7 +3034,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_self->_cigar_string;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":256
+  /* "skbio/alignment/_ssw_wrapper.pyx":241
  * 
  *     @property
  *     def cigar(self):             # <<<<<<<<<<<<<<
@@ -3142,7 +3055,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":294
+/* "skbio/alignment/_ssw_wrapper.pyx":279
  * 
  *     @property
  *     def query_sequence(self):             # <<<<<<<<<<<<<<
@@ -3169,7 +3082,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("query_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":303
+  /* "skbio/alignment/_ssw_wrapper.pyx":288
  * 
  *         """
  *         return self.read_sequence             # <<<<<<<<<<<<<<
@@ -3181,7 +3094,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_self->read_sequence;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":294
+  /* "skbio/alignment/_ssw_wrapper.pyx":279
  * 
  *     @property
  *     def query_sequence(self):             # <<<<<<<<<<<<<<
@@ -3196,7 +3109,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":306
+/* "skbio/alignment/_ssw_wrapper.pyx":291
  * 
  *     @property
  *     def target_sequence(self):             # <<<<<<<<<<<<<<
@@ -3223,7 +3136,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("target_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":315
+  /* "skbio/alignment/_ssw_wrapper.pyx":300
  * 
  *         """
  *         return self.reference_sequence             # <<<<<<<<<<<<<<
@@ -3235,7 +3148,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_self->reference_sequence;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":306
+  /* "skbio/alignment/_ssw_wrapper.pyx":291
  * 
  *     @property
  *     def target_sequence(self):             # <<<<<<<<<<<<<<
@@ -3250,7 +3163,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":318
+/* "skbio/alignment/_ssw_wrapper.pyx":303
  * 
  *     @property
  *     def aligned_query_sequence(self):             # <<<<<<<<<<<<<<
@@ -3282,25 +3195,28 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   PyObject *__pyx_t_5 = NULL;
   PyObject *__pyx_t_6 = NULL;
   PyObject *__pyx_t_7 = NULL;
+  PyObject *__pyx_t_8 = NULL;
+  Py_ssize_t __pyx_t_9;
+  PyObject *__pyx_t_10 = NULL;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("aligned_query_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":332
+  /* "skbio/alignment/_ssw_wrapper.pyx":317
  * 
  *         """
  *         if self.query_sequence:             # <<<<<<<<<<<<<<
  *             return self._get_aligned_sequence(self.query_sequence,
  *                                               self._tuples_from_cigar(),
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 332; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 332; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":333
+    /* "skbio/alignment/_ssw_wrapper.pyx":318
  *         """
  *         if self.query_sequence:
  *             return self._get_aligned_sequence(self.query_sequence,             # <<<<<<<<<<<<<<
@@ -3308,70 +3224,92 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *                                               self.query_begin, self.query_end,
  */
     __Pyx_XDECREF(__pyx_r);
-    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":334
+    /* "skbio/alignment/_ssw_wrapper.pyx":319
  *         if self.query_sequence:
  *             return self._get_aligned_sequence(self.query_sequence,
  *                                               self._tuples_from_cigar(),             # <<<<<<<<<<<<<<
  *                                               self.query_begin, self.query_end,
  *                                               "D")
  */
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    __pyx_t_7 = NULL;
+    if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) {
+      __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
+      if (likely(__pyx_t_7)) {
+        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
+        __Pyx_INCREF(__pyx_t_7);
+        __Pyx_INCREF(function);
+        __Pyx_DECREF_SET(__pyx_t_6, function);
+      }
+    }
+    if (__pyx_t_7) {
+      __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+    } else {
+      __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
     __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":335
+    /* "skbio/alignment/_ssw_wrapper.pyx":320
  *             return self._get_aligned_sequence(self.query_sequence,
  *                                               self._tuples_from_cigar(),
  *                                               self.query_begin, self.query_end,             # <<<<<<<<<<<<<<
  *                                               "D")
  *         return None
  */
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_begin); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_end); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_begin); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_6);
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":333
- *         """
- *         if self.query_sequence:
- *             return self._get_aligned_sequence(self.query_sequence,             # <<<<<<<<<<<<<<
- *                                               self._tuples_from_cigar(),
- *                                               self.query_begin, self.query_end,
- */
-    __pyx_t_7 = PyTuple_New(5); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query_end); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_7);
-    PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3);
-    __Pyx_GIVEREF(__pyx_t_3);
-    PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_5);
-    PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_4);
+    __pyx_t_8 = NULL;
+    __pyx_t_9 = 0;
+    if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_3))) {
+      __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3);
+      if (likely(__pyx_t_8)) {
+        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+        __Pyx_INCREF(__pyx_t_8);
+        __Pyx_INCREF(function);
+        __Pyx_DECREF_SET(__pyx_t_3, function);
+        __pyx_t_9 = 1;
+      }
+    }
+    __pyx_t_10 = PyTuple_New(5+__pyx_t_9); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_10);
+    if (__pyx_t_8) {
+      __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); __pyx_t_8 = NULL;
+    }
     __Pyx_GIVEREF(__pyx_t_4);
-    PyTuple_SET_ITEM(__pyx_t_7, 3, __pyx_t_6);
+    PyTuple_SET_ITEM(__pyx_t_10, 0+__pyx_t_9, __pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_5);
+    PyTuple_SET_ITEM(__pyx_t_10, 1+__pyx_t_9, __pyx_t_5);
     __Pyx_GIVEREF(__pyx_t_6);
+    PyTuple_SET_ITEM(__pyx_t_10, 2+__pyx_t_9, __pyx_t_6);
+    __Pyx_GIVEREF(__pyx_t_7);
+    PyTuple_SET_ITEM(__pyx_t_10, 3+__pyx_t_9, __pyx_t_7);
     __Pyx_INCREF(__pyx_n_s_D);
-    PyTuple_SET_ITEM(__pyx_t_7, 4, __pyx_n_s_D);
     __Pyx_GIVEREF(__pyx_n_s_D);
-    __pyx_t_3 = 0;
-    __pyx_t_5 = 0;
+    PyTuple_SET_ITEM(__pyx_t_10, 4+__pyx_t_9, __pyx_n_s_D);
     __pyx_t_4 = 0;
+    __pyx_t_5 = 0;
     __pyx_t_6 = 0;
-    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 333; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_6);
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-    __pyx_r = __pyx_t_6;
-    __pyx_t_6 = 0;
+    __pyx_t_7 = 0;
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    __pyx_r = __pyx_t_1;
+    __pyx_t_1 = 0;
     goto __pyx_L0;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":337
+  /* "skbio/alignment/_ssw_wrapper.pyx":322
  *                                               self.query_begin, self.query_end,
  *                                               "D")
  *         return None             # <<<<<<<<<<<<<<
@@ -3383,7 +3321,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = Py_None;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":318
+  /* "skbio/alignment/_ssw_wrapper.pyx":303
  * 
  *     @property
  *     def aligned_query_sequence(self):             # <<<<<<<<<<<<<<
@@ -3399,6 +3337,8 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_XDECREF(__pyx_t_5);
   __Pyx_XDECREF(__pyx_t_6);
   __Pyx_XDECREF(__pyx_t_7);
+  __Pyx_XDECREF(__pyx_t_8);
+  __Pyx_XDECREF(__pyx_t_10);
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.aligned_query_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __pyx_r = NULL;
   __pyx_L0:;
@@ -3407,7 +3347,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":340
+/* "skbio/alignment/_ssw_wrapper.pyx":325
  * 
  *     @property
  *     def aligned_target_sequence(self):             # <<<<<<<<<<<<<<
@@ -3439,25 +3379,28 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   PyObject *__pyx_t_5 = NULL;
   PyObject *__pyx_t_6 = NULL;
   PyObject *__pyx_t_7 = NULL;
+  PyObject *__pyx_t_8 = NULL;
+  Py_ssize_t __pyx_t_9;
+  PyObject *__pyx_t_10 = NULL;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("aligned_target_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":354
+  /* "skbio/alignment/_ssw_wrapper.pyx":339
  * 
  *         """
  *         if self.target_sequence:             # <<<<<<<<<<<<<<
  *             return self._get_aligned_sequence(self.target_sequence,
  *                                               self._tuples_from_cigar(),
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":355
+    /* "skbio/alignment/_ssw_wrapper.pyx":340
  *         """
  *         if self.target_sequence:
  *             return self._get_aligned_sequence(self.target_sequence,             # <<<<<<<<<<<<<<
@@ -3465,78 +3408,100 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *                                               self.target_begin,
  */
     __Pyx_XDECREF(__pyx_r);
-    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_aligned_sequence); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
+    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":356
+    /* "skbio/alignment/_ssw_wrapper.pyx":341
  *         if self.target_sequence:
  *             return self._get_aligned_sequence(self.target_sequence,
  *                                               self._tuples_from_cigar(),             # <<<<<<<<<<<<<<
  *                                               self.target_begin,
  *                                               self.target_end_optimal,
  */
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_tuples_from_cigar); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    __pyx_t_7 = NULL;
+    if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) {
+      __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
+      if (likely(__pyx_t_7)) {
+        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
+        __Pyx_INCREF(__pyx_t_7);
+        __Pyx_INCREF(function);
+        __Pyx_DECREF_SET(__pyx_t_6, function);
+      }
+    }
+    if (__pyx_t_7) {
+      __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+    } else {
+      __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
     __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":357
+    /* "skbio/alignment/_ssw_wrapper.pyx":342
  *             return self._get_aligned_sequence(self.target_sequence,
  *                                               self._tuples_from_cigar(),
  *                                               self.target_begin,             # <<<<<<<<<<<<<<
  *                                               self.target_end_optimal,
  *                                               "I")
  */
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_begin); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
+    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_begin); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":358
+    /* "skbio/alignment/_ssw_wrapper.pyx":343
  *                                               self._tuples_from_cigar(),
  *                                               self.target_begin,
  *                                               self.target_end_optimal,             # <<<<<<<<<<<<<<
  *                                               "I")
  *         return None
  */
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 358; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_6);
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":355
- *         """
- *         if self.target_sequence:
- *             return self._get_aligned_sequence(self.target_sequence,             # <<<<<<<<<<<<<<
- *                                               self._tuples_from_cigar(),
- *                                               self.target_begin,
- */
-    __pyx_t_7 = PyTuple_New(5); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 343; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_7);
-    PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3);
-    __Pyx_GIVEREF(__pyx_t_3);
-    PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_5);
-    PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_4);
+    __pyx_t_8 = NULL;
+    __pyx_t_9 = 0;
+    if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_3))) {
+      __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3);
+      if (likely(__pyx_t_8)) {
+        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+        __Pyx_INCREF(__pyx_t_8);
+        __Pyx_INCREF(function);
+        __Pyx_DECREF_SET(__pyx_t_3, function);
+        __pyx_t_9 = 1;
+      }
+    }
+    __pyx_t_10 = PyTuple_New(5+__pyx_t_9); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_10);
+    if (__pyx_t_8) {
+      __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); __pyx_t_8 = NULL;
+    }
     __Pyx_GIVEREF(__pyx_t_4);
-    PyTuple_SET_ITEM(__pyx_t_7, 3, __pyx_t_6);
+    PyTuple_SET_ITEM(__pyx_t_10, 0+__pyx_t_9, __pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_5);
+    PyTuple_SET_ITEM(__pyx_t_10, 1+__pyx_t_9, __pyx_t_5);
     __Pyx_GIVEREF(__pyx_t_6);
+    PyTuple_SET_ITEM(__pyx_t_10, 2+__pyx_t_9, __pyx_t_6);
+    __Pyx_GIVEREF(__pyx_t_7);
+    PyTuple_SET_ITEM(__pyx_t_10, 3+__pyx_t_9, __pyx_t_7);
     __Pyx_INCREF(__pyx_n_s_I);
-    PyTuple_SET_ITEM(__pyx_t_7, 4, __pyx_n_s_I);
     __Pyx_GIVEREF(__pyx_n_s_I);
-    __pyx_t_3 = 0;
-    __pyx_t_5 = 0;
+    PyTuple_SET_ITEM(__pyx_t_10, 4+__pyx_t_9, __pyx_n_s_I);
     __pyx_t_4 = 0;
+    __pyx_t_5 = 0;
     __pyx_t_6 = 0;
-    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_6);
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-    __pyx_r = __pyx_t_6;
-    __pyx_t_6 = 0;
+    __pyx_t_7 = 0;
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    __pyx_r = __pyx_t_1;
+    __pyx_t_1 = 0;
     goto __pyx_L0;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":360
+  /* "skbio/alignment/_ssw_wrapper.pyx":345
  *                                               self.target_end_optimal,
  *                                               "I")
  *         return None             # <<<<<<<<<<<<<<
@@ -3548,7 +3513,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = Py_None;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":340
+  /* "skbio/alignment/_ssw_wrapper.pyx":325
  * 
  *     @property
  *     def aligned_target_sequence(self):             # <<<<<<<<<<<<<<
@@ -3564,6 +3529,8 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_XDECREF(__pyx_t_5);
   __Pyx_XDECREF(__pyx_t_6);
   __Pyx_XDECREF(__pyx_t_7);
+  __Pyx_XDECREF(__pyx_t_8);
+  __Pyx_XDECREF(__pyx_t_10);
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure.aligned_target_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __pyx_r = NULL;
   __pyx_L0:;
@@ -3572,7 +3539,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":362
+/* "skbio/alignment/_ssw_wrapper.pyx":347
  *         return None
  * 
  *     def set_zero_based(self, is_zero_based):             # <<<<<<<<<<<<<<
@@ -3603,17 +3570,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("set_zero_based", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":366
+  /* "skbio/alignment/_ssw_wrapper.pyx":351
  * 
  *         """
  *         if is_zero_based:             # <<<<<<<<<<<<<<
  *             self.index_starts_at = 0
  *         else:
  */
-  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_is_zero_based); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_is_zero_based); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":367
+    /* "skbio/alignment/_ssw_wrapper.pyx":352
  *         """
  *         if is_zero_based:
  *             self.index_starts_at = 0             # <<<<<<<<<<<<<<
@@ -3625,7 +3592,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   }
   /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":369
+    /* "skbio/alignment/_ssw_wrapper.pyx":354
  *             self.index_starts_at = 0
  *         else:
  *             self.index_starts_at = 1             # <<<<<<<<<<<<<<
@@ -3636,7 +3603,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":362
+  /* "skbio/alignment/_ssw_wrapper.pyx":347
  *         return None
  * 
  *     def set_zero_based(self, is_zero_based):             # <<<<<<<<<<<<<<
@@ -3656,7 +3623,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":371
+/* "skbio/alignment/_ssw_wrapper.pyx":356
  *             self.index_starts_at = 1
  * 
  *     def is_zero_based(self):             # <<<<<<<<<<<<<<
@@ -3687,7 +3654,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("is_zero_based", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":380
+  /* "skbio/alignment/_ssw_wrapper.pyx":365
  * 
  *         """
  *         return self.index_starts_at == 0             # <<<<<<<<<<<<<<
@@ -3695,13 +3662,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_v_self->index_starts_at == 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_v_self->index_starts_at == 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":371
+  /* "skbio/alignment/_ssw_wrapper.pyx":356
  *             self.index_starts_at = 1
  * 
  *     def is_zero_based(self):             # <<<<<<<<<<<<<<
@@ -3720,7 +3687,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":382
+/* "skbio/alignment/_ssw_wrapper.pyx":367
  *         return self.index_starts_at == 0
  * 
  *     def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,             # <<<<<<<<<<<<<<
@@ -3765,26 +3732,26 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         case  1:
         if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_tuple_cigar)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  2:
         if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_begin)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  3:
         if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_end)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  4:
         if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_gap_type)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_aligned_sequence") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_aligned_sequence") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 5) {
       goto __pyx_L5_argtuple_error;
@@ -3803,7 +3770,7 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("_get_aligned_sequence", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure._get_aligned_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -3828,9 +3795,9 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_RefNannyDeclarations
   PyObject *__pyx_t_1 = NULL;
   PyObject *__pyx_t_2 = NULL;
-  Py_ssize_t __pyx_t_3;
-  PyObject *(*__pyx_t_4)(PyObject *);
-  PyObject *__pyx_t_5 = NULL;
+  PyObject *__pyx_t_3 = NULL;
+  Py_ssize_t __pyx_t_4;
+  PyObject *(*__pyx_t_5)(PyObject *);
   PyObject *__pyx_t_6 = NULL;
   PyObject *__pyx_t_7 = NULL;
   PyObject *(*__pyx_t_8)(PyObject *);
@@ -3842,63 +3809,78 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_get_aligned_sequence", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":385
+  /* "skbio/alignment/_ssw_wrapper.pyx":370
  *                               gap_type):
  *         # Save the original index scheme and then set it to 0 (1/2)
  *         orig_z_base = self.is_zero_based()             # <<<<<<<<<<<<<<
  *         self.set_zero_based(True)
  *         aligned_sequence = []
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_is_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_is_zero_based); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_orig_z_base = __pyx_t_2;
-  __pyx_t_2 = 0;
+  __pyx_t_3 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_2))) {
+    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+    if (likely(__pyx_t_3)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+      __Pyx_INCREF(__pyx_t_3);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_2, function);
+    }
+  }
+  if (__pyx_t_3) {
+    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  } else {
+    __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  }
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_v_orig_z_base = __pyx_t_1;
+  __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":386
+  /* "skbio/alignment/_ssw_wrapper.pyx":371
  *         # Save the original index scheme and then set it to 0 (1/2)
  *         orig_z_base = self.is_zero_based()
  *         self.set_zero_based(True)             # <<<<<<<<<<<<<<
  *         aligned_sequence = []
  *         seq = sequence[begin:end + 1]
  */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":387
+  /* "skbio/alignment/_ssw_wrapper.pyx":372
  *         orig_z_base = self.is_zero_based()
  *         self.set_zero_based(True)
  *         aligned_sequence = []             # <<<<<<<<<<<<<<
  *         seq = sequence[begin:end + 1]
  *         index = 0
  */
-  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_v_aligned_sequence = ((PyObject*)__pyx_t_1);
-  __pyx_t_1 = 0;
+  __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_v_aligned_sequence = ((PyObject*)__pyx_t_2);
+  __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":388
+  /* "skbio/alignment/_ssw_wrapper.pyx":373
  *         self.set_zero_based(True)
  *         aligned_sequence = []
  *         seq = sequence[begin:end + 1]             # <<<<<<<<<<<<<<
  *         index = 0
  *         for length, mid in tuple_cigar:
  */
-  __pyx_t_1 = PyNumber_Add(__pyx_v_end, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_sequence, 0, 0, &__pyx_v_begin, &__pyx_t_1, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyNumber_Add(__pyx_v_end, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_seq = __pyx_t_2;
-  __pyx_t_2 = 0;
+  __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_sequence, 0, 0, &__pyx_v_begin, &__pyx_t_2, NULL, 0, 0, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_v_seq = __pyx_t_1;
+  __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":389
+  /* "skbio/alignment/_ssw_wrapper.pyx":374
  *         aligned_sequence = []
  *         seq = sequence[begin:end + 1]
  *         index = 0             # <<<<<<<<<<<<<<
@@ -3908,50 +3890,54 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_INCREF(__pyx_int_0);
   __pyx_v_index = __pyx_int_0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":390
+  /* "skbio/alignment/_ssw_wrapper.pyx":375
  *         seq = sequence[begin:end + 1]
  *         index = 0
  *         for length, mid in tuple_cigar:             # <<<<<<<<<<<<<<
  *             if mid == 'M':
  *                 aligned_sequence += [seq[i]
  */
-  if (PyList_CheckExact(__pyx_v_tuple_cigar) || PyTuple_CheckExact(__pyx_v_tuple_cigar)) {
-    __pyx_t_2 = __pyx_v_tuple_cigar; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
-    __pyx_t_4 = NULL;
+  if (likely(PyList_CheckExact(__pyx_v_tuple_cigar)) || PyTuple_CheckExact(__pyx_v_tuple_cigar)) {
+    __pyx_t_1 = __pyx_v_tuple_cigar; __Pyx_INCREF(__pyx_t_1); __pyx_t_4 = 0;
+    __pyx_t_5 = NULL;
   } else {
-    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_tuple_cigar); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext;
+    __pyx_t_4 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_tuple_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_5 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   for (;;) {
-    if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_2)) {
-      if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
-    } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_2)) {
-      if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
+    if (likely(!__pyx_t_5)) {
+      if (likely(PyList_CheckExact(__pyx_t_1))) {
+        if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_1)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_2);
+        #endif
+      } else {
+        if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_2);
+        #endif
+      }
     } else {
-      __pyx_t_1 = __pyx_t_4(__pyx_t_2);
-      if (unlikely(!__pyx_t_1)) {
+      __pyx_t_2 = __pyx_t_5(__pyx_t_1);
+      if (unlikely(!__pyx_t_2)) {
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
-      __Pyx_GOTREF(__pyx_t_1);
+      __Pyx_GOTREF(__pyx_t_2);
     }
-    if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) {
-      PyObject* sequence = __pyx_t_1;
+    if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) {
+      PyObject* sequence = __pyx_t_2;
       #if CYTHON_COMPILING_IN_CPYTHON
       Py_ssize_t size = Py_SIZE(sequence);
       #else
@@ -3960,36 +3946,36 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       if (unlikely(size != 2)) {
         if (size > 2) __Pyx_RaiseTooManyValuesError(2);
         else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       #if CYTHON_COMPILING_IN_CPYTHON
       if (likely(PyTuple_CheckExact(sequence))) {
-        __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); 
+        __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); 
         __pyx_t_6 = PyTuple_GET_ITEM(sequence, 1); 
       } else {
-        __pyx_t_5 = PyList_GET_ITEM(sequence, 0); 
+        __pyx_t_3 = PyList_GET_ITEM(sequence, 0); 
         __pyx_t_6 = PyList_GET_ITEM(sequence, 1); 
       }
-      __Pyx_INCREF(__pyx_t_5);
+      __Pyx_INCREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_t_6);
       #else
-      __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_5);
-      __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
       #endif
-      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
     } else {
       Py_ssize_t index = -1;
-      __pyx_t_7 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_7);
-      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
       __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext;
-      index = 0; __pyx_t_5 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_5)) goto __pyx_L5_unpacking_failed;
-      __Pyx_GOTREF(__pyx_t_5);
+      index = 0; __pyx_t_3 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed;
+      __Pyx_GOTREF(__pyx_t_3);
       index = 1; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L5_unpacking_failed;
       __Pyx_GOTREF(__pyx_t_6);
-      if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_t_8 = NULL;
       __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
       goto __pyx_L6_unpacking_done;
@@ -3997,85 +3983,89 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
       __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
       __pyx_t_8 = NULL;
       if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_L6_unpacking_done:;
     }
-    __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_5);
-    __pyx_t_5 = 0;
+    __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_3);
+    __pyx_t_3 = 0;
     __Pyx_XDECREF_SET(__pyx_v_mid, __pyx_t_6);
     __pyx_t_6 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":391
+    /* "skbio/alignment/_ssw_wrapper.pyx":376
  *         index = 0
  *         for length, mid in tuple_cigar:
  *             if mid == 'M':             # <<<<<<<<<<<<<<
  *                 aligned_sequence += [seq[i]
  *                                      for i in range(index, length + index)]
  */
-    __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_mid, __pyx_n_s_M, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_mid, __pyx_n_s_M, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     if (__pyx_t_9) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":392
+      /* "skbio/alignment/_ssw_wrapper.pyx":377
  *         for length, mid in tuple_cigar:
  *             if mid == 'M':
  *                 aligned_sequence += [seq[i]             # <<<<<<<<<<<<<<
  *                                      for i in range(index, length + index)]
  *                 index += length
  */
-      __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_1);
+      __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_2);
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":393
+      /* "skbio/alignment/_ssw_wrapper.pyx":378
  *             if mid == 'M':
  *                 aligned_sequence += [seq[i]
  *                                      for i in range(index, length + index)]             # <<<<<<<<<<<<<<
  *                 index += length
  *             elif mid == gap_type:
  */
-      __pyx_t_6 = PyNumber_Add(__pyx_v_length, __pyx_v_index); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = PyNumber_Add(__pyx_v_length, __pyx_v_index); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
-      __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_5);
+      __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_v_index);
-      PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_index);
       __Pyx_GIVEREF(__pyx_v_index);
-      PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6);
+      PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
       __Pyx_GIVEREF(__pyx_t_6);
+      PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_6);
       __pyx_t_6 = 0;
-      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_6);
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      if (PyList_CheckExact(__pyx_t_6) || PyTuple_CheckExact(__pyx_t_6)) {
-        __pyx_t_5 = __pyx_t_6; __Pyx_INCREF(__pyx_t_5); __pyx_t_10 = 0;
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      if (likely(PyList_CheckExact(__pyx_t_6)) || PyTuple_CheckExact(__pyx_t_6)) {
+        __pyx_t_3 = __pyx_t_6; __Pyx_INCREF(__pyx_t_3); __pyx_t_10 = 0;
         __pyx_t_11 = NULL;
       } else {
-        __pyx_t_10 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        __Pyx_GOTREF(__pyx_t_5);
-        __pyx_t_11 = Py_TYPE(__pyx_t_5)->tp_iternext;
+        __pyx_t_10 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        __pyx_t_11 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
       for (;;) {
-        if (!__pyx_t_11 && PyList_CheckExact(__pyx_t_5)) {
-          if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_5)) break;
-          #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_6 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-          #else
-          __pyx_t_6 = PySequence_ITEM(__pyx_t_5, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-          #endif
-        } else if (!__pyx_t_11 && PyTuple_CheckExact(__pyx_t_5)) {
-          if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_5)) break;
-          #if CYTHON_COMPILING_IN_CPYTHON
-          __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-          #else
-          __pyx_t_6 = PySequence_ITEM(__pyx_t_5, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-          #endif
+        if (likely(!__pyx_t_11)) {
+          if (likely(PyList_CheckExact(__pyx_t_3))) {
+            if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_3)) break;
+            #if CYTHON_COMPILING_IN_CPYTHON
+            __pyx_t_6 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            #else
+            __pyx_t_6 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            __Pyx_GOTREF(__pyx_t_6);
+            #endif
+          } else {
+            if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
+            #if CYTHON_COMPILING_IN_CPYTHON
+            __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            #else
+            __pyx_t_6 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            __Pyx_GOTREF(__pyx_t_6);
+            #endif
+          }
         } else {
-          __pyx_t_6 = __pyx_t_11(__pyx_t_5);
+          __pyx_t_6 = __pyx_t_11(__pyx_t_3);
           if (unlikely(!__pyx_t_6)) {
             PyObject* exc_type = PyErr_Occurred();
             if (exc_type) {
               if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-              else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+              else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
             }
             break;
           }
@@ -4084,177 +4074,221 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
         __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);
         __pyx_t_6 = 0;
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":392
+        /* "skbio/alignment/_ssw_wrapper.pyx":377
  *         for length, mid in tuple_cigar:
  *             if mid == 'M':
  *                 aligned_sequence += [seq[i]             # <<<<<<<<<<<<<<
  *                                      for i in range(index, length + index)]
  *                 index += length
  */
-        __pyx_t_6 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+        __pyx_t_6 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
         __Pyx_GOTREF(__pyx_t_6);
-        if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+        /* "skbio/alignment/_ssw_wrapper.pyx":378
+ *             if mid == 'M':
+ *                 aligned_sequence += [seq[i]
+ *                                      for i in range(index, length + index)]             # <<<<<<<<<<<<<<
+ *                 index += length
+ *             elif mid == gap_type:
+ */
       }
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __pyx_t_5 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-      __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_5));
-      __pyx_t_5 = 0;
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":377
+ *         for length, mid in tuple_cigar:
+ *             if mid == 'M':
+ *                 aligned_sequence += [seq[i]             # <<<<<<<<<<<<<<
+ *                                      for i in range(index, length + index)]
+ *                 index += length
+ */
+      __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+      __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_3));
+      __pyx_t_3 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":394
+      /* "skbio/alignment/_ssw_wrapper.pyx":379
  *                 aligned_sequence += [seq[i]
  *                                      for i in range(index, length + index)]
  *                 index += length             # <<<<<<<<<<<<<<
  *             elif mid == gap_type:
  *                 aligned_sequence += (['-'] * length)
  */
-      __pyx_t_5 = PyNumber_InPlaceAdd(__pyx_v_index, __pyx_v_length); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_5);
-      __pyx_t_5 = 0;
+      __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_index, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
+      __pyx_t_3 = 0;
       goto __pyx_L7;
     }
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":395
+    /* "skbio/alignment/_ssw_wrapper.pyx":380
  *                                      for i in range(index, length + index)]
  *                 index += length
  *             elif mid == gap_type:             # <<<<<<<<<<<<<<
  *                 aligned_sequence += (['-'] * length)
  *             else:
  */
-    __pyx_t_5 = PyObject_RichCompare(__pyx_v_mid, __pyx_v_gap_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    __pyx_t_3 = PyObject_RichCompare(__pyx_v_mid, __pyx_v_gap_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     if (__pyx_t_9) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":396
+      /* "skbio/alignment/_ssw_wrapper.pyx":381
  *                 index += length
  *             elif mid == gap_type:
  *                 aligned_sequence += (['-'] * length)             # <<<<<<<<<<<<<<
  *             else:
  *                 pass
  */
-      __pyx_t_5 = PyList_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_5);
+      __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_kp_s__8);
-      PyList_SET_ITEM(__pyx_t_5, 0, __pyx_kp_s__8);
       __Pyx_GIVEREF(__pyx_kp_s__8);
-      { PyObject* __pyx_temp = PyNumber_InPlaceMultiply(__pyx_t_5, __pyx_v_length); if (unlikely(!__pyx_temp)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      PyList_SET_ITEM(__pyx_t_3, 0, __pyx_kp_s__8);
+      { PyObject* __pyx_temp = PyNumber_InPlaceMultiply(__pyx_t_3, __pyx_v_length); if (unlikely(!__pyx_temp)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_temp);
-        __Pyx_DECREF(__pyx_t_5);
-        __pyx_t_5 = __pyx_temp;
+        __Pyx_DECREF(__pyx_t_3);
+        __pyx_t_3 = __pyx_temp;
       }
-      __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_1);
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_1));
-      __pyx_t_1 = 0;
+      __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_2);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_2));
+      __pyx_t_2 = 0;
       goto __pyx_L7;
     }
     /*else*/ {
     }
     __pyx_L7:;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":375
+ *         seq = sequence[begin:end + 1]
+ *         index = 0
+ *         for length, mid in tuple_cigar:             # <<<<<<<<<<<<<<
+ *             if mid == 'M':
+ *                 aligned_sequence += [seq[i]
+ */
   }
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":400
+  /* "skbio/alignment/_ssw_wrapper.pyx":385
  *                 pass
  *         # Our sequence end is sometimes beyond the cigar:
  *         aligned_sequence += [seq[i] for i in range(index, end - begin + 1)]             # <<<<<<<<<<<<<<
  *         # Revert our index scheme to the original (2/2)
  *         self.set_zero_based(orig_z_base)
  */
-  __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_1 = PyNumber_Subtract(__pyx_v_end, __pyx_v_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_5 = PyNumber_Add(__pyx_t_1, __pyx_int_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = PyNumber_Subtract(__pyx_v_end, __pyx_v_begin); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_int_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
   __Pyx_INCREF(__pyx_v_index);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_index);
   __Pyx_GIVEREF(__pyx_v_index);
-  PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5);
-  __Pyx_GIVEREF(__pyx_t_5);
-  __pyx_t_5 = 0;
-  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_1, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  if (PyList_CheckExact(__pyx_t_5) || PyTuple_CheckExact(__pyx_t_5)) {
-    __pyx_t_1 = __pyx_t_5; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0;
-    __pyx_t_4 = NULL;
+  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_index);
+  __Pyx_GIVEREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
+  __pyx_t_3 = 0;
+  __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) {
+    __pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __pyx_t_4 = 0;
+    __pyx_t_5 = NULL;
   } else {
-    __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_4 = Py_TYPE(__pyx_t_1)->tp_iternext;
+    __pyx_t_4 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __pyx_t_5 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   for (;;) {
-    if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_1)) {
-      if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_5 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_5 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
-    } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_1)) {
-      if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_5 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
+    if (likely(!__pyx_t_5)) {
+      if (likely(PyList_CheckExact(__pyx_t_2))) {
+        if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_2)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        #endif
+      } else {
+        if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        #endif
+      }
     } else {
-      __pyx_t_5 = __pyx_t_4(__pyx_t_1);
-      if (unlikely(!__pyx_t_5)) {
+      __pyx_t_3 = __pyx_t_5(__pyx_t_2);
+      if (unlikely(!__pyx_t_3)) {
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
-      __Pyx_GOTREF(__pyx_t_5);
+      __Pyx_GOTREF(__pyx_t_3);
     }
-    __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_5);
-    __pyx_t_5 = 0;
-    __pyx_t_5 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
-    __Pyx_GOTREF(__pyx_t_5);
-    if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_3);
+    __pyx_t_3 = 0;
+    __pyx_t_3 = PyObject_GetItem(__pyx_v_seq, __pyx_v_i); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    __Pyx_GOTREF(__pyx_t_3);
+    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_3))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   }
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_1));
-  __pyx_t_1 = 0;
+  __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_aligned_sequence, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __Pyx_DECREF_SET(__pyx_v_aligned_sequence, ((PyObject*)__pyx_t_2));
+  __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":402
+  /* "skbio/alignment/_ssw_wrapper.pyx":387
  *         aligned_sequence += [seq[i] for i in range(index, end - begin + 1)]
  *         # Revert our index scheme to the original (2/2)
  *         self.set_zero_based(orig_z_base)             # <<<<<<<<<<<<<<
  *         return "".join(aligned_sequence)
  * 
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_zero_based); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_INCREF(__pyx_v_orig_z_base);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_orig_z_base);
-  __Pyx_GIVEREF(__pyx_v_orig_z_base);
-  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_5);
+  __pyx_t_3 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_1))) {
+    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1);
+    if (likely(__pyx_t_3)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
+      __Pyx_INCREF(__pyx_t_3);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_1, function);
+    }
+  }
+  if (!__pyx_t_3) {
+    __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_orig_z_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+  } else {
+    __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_6);
+    __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL;
+    __Pyx_INCREF(__pyx_v_orig_z_base);
+    __Pyx_GIVEREF(__pyx_v_orig_z_base);
+    PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_v_orig_z_base);
+    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+  }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":403
+  /* "skbio/alignment/_ssw_wrapper.pyx":388
  *         # Revert our index scheme to the original (2/2)
  *         self.set_zero_based(orig_z_base)
  *         return "".join(aligned_sequence)             # <<<<<<<<<<<<<<
@@ -4262,13 +4296,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
  *     def _tuples_from_cigar(self):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_5 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_aligned_sequence); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_r = __pyx_t_5;
-  __pyx_t_5 = 0;
+  __pyx_t_2 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_aligned_sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_r = __pyx_t_2;
+  __pyx_t_2 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":382
+  /* "skbio/alignment/_ssw_wrapper.pyx":367
  *         return self.index_starts_at == 0
  * 
  *     def _get_aligned_sequence(self, sequence, tuple_cigar, begin, end,             # <<<<<<<<<<<<<<
@@ -4280,7 +4314,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_L1_error:;
   __Pyx_XDECREF(__pyx_t_1);
   __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_XDECREF(__pyx_t_3);
   __Pyx_XDECREF(__pyx_t_6);
   __Pyx_XDECREF(__pyx_t_7);
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure._get_aligned_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename);
@@ -4298,7 +4332,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":405
+/* "skbio/alignment/_ssw_wrapper.pyx":390
  *         return "".join(aligned_sequence)
  * 
  *     def _tuples_from_cigar(self):             # <<<<<<<<<<<<<<
@@ -4330,77 +4364,82 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   Py_ssize_t __pyx_t_3;
   PyObject *(*__pyx_t_4)(PyObject *);
   PyObject *__pyx_t_5 = NULL;
-  int __pyx_t_6;
+  PyObject *__pyx_t_6 = NULL;
   int __pyx_t_7;
+  int __pyx_t_8;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_tuples_from_cigar", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":406
+  /* "skbio/alignment/_ssw_wrapper.pyx":391
  * 
  *     def _tuples_from_cigar(self):
  *         tuples = []             # <<<<<<<<<<<<<<
  *         length_stack = []
  *         for character in self.cigar:
  */
-  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 406; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_tuples = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":407
+  /* "skbio/alignment/_ssw_wrapper.pyx":392
  *     def _tuples_from_cigar(self):
  *         tuples = []
  *         length_stack = []             # <<<<<<<<<<<<<<
  *         for character in self.cigar:
  *             if character.isdigit():
  */
-  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_length_stack = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":408
+  /* "skbio/alignment/_ssw_wrapper.pyx":393
  *         tuples = []
  *         length_stack = []
  *         for character in self.cigar:             # <<<<<<<<<<<<<<
  *             if character.isdigit():
  *                 length_stack.append(character)
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  if (PyList_CheckExact(__pyx_t_1) || PyTuple_CheckExact(__pyx_t_1)) {
+  if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) {
     __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
     __pyx_t_4 = NULL;
   } else {
-    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext;
+    __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   for (;;) {
-    if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_2)) {
-      if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
-    } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_2)) {
-      if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
+    if (likely(!__pyx_t_4)) {
+      if (likely(PyList_CheckExact(__pyx_t_2))) {
+        if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_1);
+        #endif
+      } else {
+        if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_1);
+        #endif
+      }
     } else {
       __pyx_t_1 = __pyx_t_4(__pyx_t_2);
       if (unlikely(!__pyx_t_1)) {
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
@@ -4409,74 +4448,97 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
     __Pyx_XDECREF_SET(__pyx_v_character, __pyx_t_1);
     __pyx_t_1 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":409
+    /* "skbio/alignment/_ssw_wrapper.pyx":394
  *         length_stack = []
  *         for character in self.cigar:
  *             if character.isdigit():             # <<<<<<<<<<<<<<
  *                 length_stack.append(character)
  *             else:
  */
-    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_character, __pyx_n_s_isdigit); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_character, __pyx_n_s_isdigit); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    __pyx_t_6 = NULL;
+    if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_5))) {
+      __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
+      if (likely(__pyx_t_6)) {
+        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
+        __Pyx_INCREF(__pyx_t_6);
+        __Pyx_INCREF(function);
+        __Pyx_DECREF_SET(__pyx_t_5, function);
+      }
+    }
     if (__pyx_t_6) {
+      __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+    } else {
+      __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    if (__pyx_t_7) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":410
+      /* "skbio/alignment/_ssw_wrapper.pyx":395
  *         for character in self.cigar:
  *             if character.isdigit():
  *                 length_stack.append(character)             # <<<<<<<<<<<<<<
  *             else:
  *                 tuples.append((int("".join(length_stack)), character))
  */
-      __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_length_stack, __pyx_v_character); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_8 = __Pyx_PyList_Append(__pyx_v_length_stack, __pyx_v_character); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       goto __pyx_L5;
     }
     /*else*/ {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":412
+      /* "skbio/alignment/_ssw_wrapper.pyx":397
  *                 length_stack.append(character)
  *             else:
  *                 tuples.append((int("".join(length_stack)), character))             # <<<<<<<<<<<<<<
  *                 length_stack = []
  *         return tuples
  */
-      __pyx_t_5 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_length_stack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_5);
-      __pyx_t_1 = PyNumber_Int(__pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = __Pyx_PyString_Join(__pyx_kp_s__6, __pyx_v_length_stack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_5 = PyNumber_Int(__pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_5);
-      PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
-      __Pyx_GIVEREF(__pyx_t_1);
+      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+      __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+      __Pyx_GIVEREF(__pyx_t_5);
+      PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
       __Pyx_INCREF(__pyx_v_character);
-      PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_character);
       __Pyx_GIVEREF(__pyx_v_character);
-      __pyx_t_1 = 0;
-      __pyx_t_7 = __Pyx_PyList_Append(__pyx_v_tuples, __pyx_t_5); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+      PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_character);
+      __pyx_t_5 = 0;
+      __pyx_t_8 = __Pyx_PyList_Append(__pyx_v_tuples, __pyx_t_1); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":413
+      /* "skbio/alignment/_ssw_wrapper.pyx":398
  *             else:
  *                 tuples.append((int("".join(length_stack)), character))
  *                 length_stack = []             # <<<<<<<<<<<<<<
  *         return tuples
  * 
  */
-      __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_DECREF_SET(__pyx_v_length_stack, ((PyObject*)__pyx_t_5));
-      __pyx_t_5 = 0;
+      __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+      __Pyx_DECREF_SET(__pyx_v_length_stack, ((PyObject*)__pyx_t_1));
+      __pyx_t_1 = 0;
     }
     __pyx_L5:;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":393
+ *         tuples = []
+ *         length_stack = []
+ *         for character in self.cigar:             # <<<<<<<<<<<<<<
+ *             if character.isdigit():
+ *                 length_stack.append(character)
+ */
   }
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":414
+  /* "skbio/alignment/_ssw_wrapper.pyx":399
  *                 tuples.append((int("".join(length_stack)), character))
  *                 length_stack = []
  *         return tuples             # <<<<<<<<<<<<<<
@@ -4488,7 +4550,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __pyx_r = __pyx_v_tuples;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":405
+  /* "skbio/alignment/_ssw_wrapper.pyx":390
  *         return "".join(aligned_sequence)
  * 
  *     def _tuples_from_cigar(self):             # <<<<<<<<<<<<<<
@@ -4501,6 +4563,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   __Pyx_XDECREF(__pyx_t_1);
   __Pyx_XDECREF(__pyx_t_2);
   __Pyx_XDECREF(__pyx_t_5);
+  __Pyx_XDECREF(__pyx_t_6);
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.AlignmentStructure._tuples_from_cigar", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __pyx_r = NULL;
   __pyx_L0:;
@@ -4512,7 +4575,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":558
+/* "skbio/alignment/_ssw_wrapper.pyx":543
  *     cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
  * 
  *     def __cinit__(self, query_sequence,             # <<<<<<<<<<<<<<
@@ -4553,7 +4616,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
     values[3] = ((PyObject *)__pyx_int_2);
     values[4] = ((PyObject *)__pyx_int_15);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":563
+    /* "skbio/alignment/_ssw_wrapper.pyx":548
  *                   score_size=2,  # BLASTN Default
  *                   mask_length=15,  # Minimum length for a suboptimal alignment
  *                   mask_auto=True,             # <<<<<<<<<<<<<<
@@ -4562,7 +4625,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[5] = ((PyObject *)Py_True);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":564
+    /* "skbio/alignment/_ssw_wrapper.pyx":549
  *                   mask_length=15,  # Minimum length for a suboptimal alignment
  *                   mask_auto=True,
  *                   score_only=False,             # <<<<<<<<<<<<<<
@@ -4571,7 +4634,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[6] = ((PyObject *)Py_False);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":565
+    /* "skbio/alignment/_ssw_wrapper.pyx":550
  *                   mask_auto=True,
  *                   score_only=False,
  *                   score_filter=None,             # <<<<<<<<<<<<<<
@@ -4580,7 +4643,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[7] = ((PyObject *)Py_None);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":566
+    /* "skbio/alignment/_ssw_wrapper.pyx":551
  *                   score_only=False,
  *                   score_filter=None,
  *                   distance_filter=None,             # <<<<<<<<<<<<<<
@@ -4589,7 +4652,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[8] = ((PyObject *)Py_None);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":567
+    /* "skbio/alignment/_ssw_wrapper.pyx":552
  *                   score_filter=None,
  *                   distance_filter=None,
  *                   override_skip_babp=False,             # <<<<<<<<<<<<<<
@@ -4598,7 +4661,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[9] = ((PyObject *)Py_False);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":568
+    /* "skbio/alignment/_ssw_wrapper.pyx":553
  *                   distance_filter=None,
  *                   override_skip_babp=False,
  *                   protein=False,             # <<<<<<<<<<<<<<
@@ -4609,7 +4672,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
     values[11] = ((PyObject *)__pyx_int_2);
     values[12] = ((PyObject *)__pyx_int_neg_3);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":571
+    /* "skbio/alignment/_ssw_wrapper.pyx":556
  *                   match_score=2,  # BLASTN Default
  *                   mismatch_score=-3,  # BLASTN Default
  *                   substitution_matrix=None,             # <<<<<<<<<<<<<<
@@ -4618,7 +4681,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[13] = ((PyObject *)Py_None);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":572
+    /* "skbio/alignment/_ssw_wrapper.pyx":557
  *                   mismatch_score=-3,  # BLASTN Default
  *                   substitution_matrix=None,
  *                   suppress_sequences=False,             # <<<<<<<<<<<<<<
@@ -4627,7 +4690,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
  */
     values[14] = ((PyObject *)Py_False);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":573
+    /* "skbio/alignment/_ssw_wrapper.pyx":558
  *                   substitution_matrix=None,
  *                   suppress_sequences=False,
  *                   zero_index=True):             # <<<<<<<<<<<<<<
@@ -4740,7 +4803,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else {
       switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -4783,7 +4846,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 1, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 1, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 543; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -4791,7 +4854,7 @@ static int __pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_1__c
   __pyx_L4_argument_unpacking_done:;
   __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___cinit__(((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self), __pyx_v_query_sequence, __pyx_v_gap_open_penalty, __pyx_v_gap_extend_penalty, __pyx_v_score_size, __pyx_v_mask_length, __pyx_v_mask_auto, __pyx_v_score_only, __pyx_v_score_filter, __pyx_v_distance_filter, __pyx_v_override_skip_babp, __pyx_v_protein, __pyx_v_match_score, __pyx_v_mismatch_score, __pyx_v_substitution [...]
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":558
+  /* "skbio/alignment/_ssw_wrapper.pyx":543
  *     cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
  * 
  *     def __cinit__(self, query_sequence,             # <<<<<<<<<<<<<<
@@ -4825,14 +4888,15 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_t_5numpy_uint16_t __pyx_t_7;
   PyObject *__pyx_t_8 = NULL;
   PyObject *__pyx_t_9 = NULL;
-  int __pyx_t_10;
-  int __pyx_t_11;
-  PyObject *__pyx_t_12 = NULL;
-  PyObject *__pyx_t_13 = NULL;
-  PyObject *__pyx_t_14 = NULL;
-  Py_ssize_t __pyx_t_15;
-  __pyx_t_5numpy_int8_t __pyx_t_16;
-  int __pyx_lineno = 0;
+  Py_ssize_t __pyx_t_10;
+  PyObject *__pyx_t_11 = NULL;
+  int __pyx_t_12;
+  int __pyx_t_13;
+  PyObject *__pyx_t_14 = NULL;
+  PyObject *__pyx_t_15 = NULL;
+  PyObject *__pyx_t_16 = NULL;
+  __pyx_t_5numpy_int8_t __pyx_t_17;
+  int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("__cinit__", 0);
@@ -4845,14 +4909,14 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_pybuffernd_read_seq.data = NULL;
   __pyx_pybuffernd_read_seq.rcbuffer = &__pyx_pybuffer_read_seq;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":575
+  /* "skbio/alignment/_ssw_wrapper.pyx":560
  *                   zero_index=True):
  *         # initalize our values
  *         self.read_sequence = query_sequence             # <<<<<<<<<<<<<<
  *         if gap_open_penalty <= 0:
  *             raise ValueError("`gap_open_penalty` must be > 0")
  */
-  if (!(likely(PyString_CheckExact(__pyx_v_query_sequence))||((__pyx_v_query_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_query_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(PyString_CheckExact(__pyx_v_query_sequence))||((__pyx_v_query_sequence) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(__pyx_v_query_sequence)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_query_sequence;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -4861,79 +4925,79 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_v_self->read_sequence = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":576
+  /* "skbio/alignment/_ssw_wrapper.pyx":561
  *         # initalize our values
  *         self.read_sequence = query_sequence
  *         if gap_open_penalty <= 0:             # <<<<<<<<<<<<<<
  *             raise ValueError("`gap_open_penalty` must be > 0")
  *         self.gap_open_penalty = gap_open_penalty
  */
-  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_open_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_open_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":577
+    /* "skbio/alignment/_ssw_wrapper.pyx":562
  *         self.read_sequence = query_sequence
  *         if gap_open_penalty <= 0:
  *             raise ValueError("`gap_open_penalty` must be > 0")             # <<<<<<<<<<<<<<
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:
  */
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_Raise(__pyx_t_1, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":578
+  /* "skbio/alignment/_ssw_wrapper.pyx":563
  *         if gap_open_penalty <= 0:
  *             raise ValueError("`gap_open_penalty` must be > 0")
  *         self.gap_open_penalty = gap_open_penalty             # <<<<<<<<<<<<<<
  *         if gap_extend_penalty <= 0:
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  */
-  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_open_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 578; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_open_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_self->gap_open_penalty = __pyx_t_3;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":579
+  /* "skbio/alignment/_ssw_wrapper.pyx":564
  *             raise ValueError("`gap_open_penalty` must be > 0")
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:             # <<<<<<<<<<<<<<
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  *         self.gap_extend_penalty = gap_extend_penalty
  */
-  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_extend_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyObject_RichCompare(__pyx_v_gap_extend_penalty, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   if (__pyx_t_2) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":580
+    /* "skbio/alignment/_ssw_wrapper.pyx":565
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:
  *             raise ValueError("`gap_extend_penalty` must be > 0")             # <<<<<<<<<<<<<<
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \
  */
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_Raise(__pyx_t_1, 0, 0, 0);
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":581
+  /* "skbio/alignment/_ssw_wrapper.pyx":566
  *         if gap_extend_penalty <= 0:
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  *         self.gap_extend_penalty = gap_extend_penalty             # <<<<<<<<<<<<<<
  *         self.distance_filter = 0 if distance_filter is None else \
  *             distance_filter
  */
-  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_extend_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_v_gap_extend_penalty); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 566; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_self->gap_extend_penalty = __pyx_t_3;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":582
+  /* "skbio/alignment/_ssw_wrapper.pyx":567
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \             # <<<<<<<<<<<<<<
@@ -4945,18 +5009,18 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
     __pyx_t_4 = 0;
   } else {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":583
+    /* "skbio/alignment/_ssw_wrapper.pyx":568
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \
  *             distance_filter             # <<<<<<<<<<<<<<
  *         self.score_filter = 0 if score_filter is None else score_filter
  *         self.suppress_sequences = suppress_sequences
  */
-    __pyx_t_5 = __Pyx_PyInt_As_npy_int32(__pyx_v_distance_filter); if (unlikely((__pyx_t_5 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyInt_As_npy_int32(__pyx_v_distance_filter); if (unlikely((__pyx_t_5 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_t_4 = __pyx_t_5;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":582
+  /* "skbio/alignment/_ssw_wrapper.pyx":567
  *             raise ValueError("`gap_extend_penalty` must be > 0")
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \             # <<<<<<<<<<<<<<
@@ -4965,7 +5029,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
  */
   __pyx_v_self->distance_filter = __pyx_t_4;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":584
+  /* "skbio/alignment/_ssw_wrapper.pyx":569
  *         self.distance_filter = 0 if distance_filter is None else \
  *             distance_filter
  *         self.score_filter = 0 if score_filter is None else score_filter             # <<<<<<<<<<<<<<
@@ -4976,19 +5040,19 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   if ((__pyx_t_2 != 0)) {
     __pyx_t_6 = 0;
   } else {
-    __pyx_t_7 = __Pyx_PyInt_As_npy_uint16(__pyx_v_score_filter); if (unlikely((__pyx_t_7 == (npy_uint16)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_7 = __Pyx_PyInt_As_npy_uint16(__pyx_v_score_filter); if (unlikely((__pyx_t_7 == (npy_uint16)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_t_6 = __pyx_t_7;
   }
   __pyx_v_self->score_filter = __pyx_t_6;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":585
+  /* "skbio/alignment/_ssw_wrapper.pyx":570
  *             distance_filter
  *         self.score_filter = 0 if score_filter is None else score_filter
  *         self.suppress_sequences = suppress_sequences             # <<<<<<<<<<<<<<
  *         self.is_protein = protein
  *         self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)
  */
-  if (!(likely(((__pyx_v_suppress_sequences) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_suppress_sequences, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_v_suppress_sequences) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_suppress_sequences, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_suppress_sequences;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -4997,14 +5061,14 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_v_self->suppress_sequences = ((PyBoolObject *)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":586
+  /* "skbio/alignment/_ssw_wrapper.pyx":571
  *         self.score_filter = 0 if score_filter is None else score_filter
  *         self.suppress_sequences = suppress_sequences
  *         self.is_protein = protein             # <<<<<<<<<<<<<<
  *         self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)
  *         # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
  */
-  if (!(likely(((__pyx_v_protein) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_protein, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_v_protein) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_protein, __pyx_ptype_7cpython_4bool_bool))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_1 = __pyx_v_protein;
   __Pyx_INCREF(__pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
@@ -5013,47 +5077,62 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __pyx_v_self->is_protein = ((PyBoolObject *)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":587
+  /* "skbio/alignment/_ssw_wrapper.pyx":572
  *         self.suppress_sequences = suppress_sequences
  *         self.is_protein = protein
  *         self.bit_flag = self._get_bit_flag(override_skip_babp, score_only)             # <<<<<<<<<<<<<<
  *         # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
  *         # Dijkstra knows what's up:
  */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_bit_flag); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_bit_flag); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_8);
+  __pyx_t_9 = NULL;
+  __pyx_t_10 = 0;
+  if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_8))) {
+    __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_8);
+    if (likely(__pyx_t_9)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
+      __Pyx_INCREF(__pyx_t_9);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_8, function);
+      __pyx_t_10 = 1;
+    }
+  }
+  __pyx_t_11 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_11);
+  if (__pyx_t_9) {
+    __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_9); __pyx_t_9 = NULL;
+  }
   __Pyx_INCREF(__pyx_v_override_skip_babp);
-  PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_override_skip_babp);
   __Pyx_GIVEREF(__pyx_v_override_skip_babp);
+  PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_10, __pyx_v_override_skip_babp);
   __Pyx_INCREF(__pyx_v_score_only);
-  PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_v_score_only);
   __Pyx_GIVEREF(__pyx_v_score_only);
-  __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_8, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_9);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_10, __pyx_v_score_only);
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_11, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
   __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_t_9); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+  __pyx_t_3 = __Pyx_PyInt_As_npy_uint8(__pyx_t_1); if (unlikely((__pyx_t_3 == (npy_uint8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_self->bit_flag = __pyx_t_3;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":590
+  /* "skbio/alignment/_ssw_wrapper.pyx":575
  *         # http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html
  *         # Dijkstra knows what's up:
  *         self.index_starts_at = 0 if zero_index else 1             # <<<<<<<<<<<<<<
  *         # set up our matrix
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
  */
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_zero_index); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_zero_index); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_2) {
-    __pyx_t_10 = 0;
+    __pyx_t_12 = 0;
   } else {
-    __pyx_t_10 = 1;
+    __pyx_t_12 = 1;
   }
-  __pyx_v_self->index_starts_at = __pyx_t_10;
+  __pyx_v_self->index_starts_at = __pyx_t_12;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":593
+  /* "skbio/alignment/_ssw_wrapper.pyx":578
  *         # set up our matrix
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
  *         if substitution_matrix is None:             # <<<<<<<<<<<<<<
@@ -5061,137 +5140,137 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
  *                 raise Exception("Must provide a substitution matrix for"
  */
   __pyx_t_2 = (__pyx_v_substitution_matrix == Py_None);
-  __pyx_t_11 = (__pyx_t_2 != 0);
-  if (__pyx_t_11) {
+  __pyx_t_13 = (__pyx_t_2 != 0);
+  if (__pyx_t_13) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":594
+    /* "skbio/alignment/_ssw_wrapper.pyx":579
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] matrix
  *         if substitution_matrix is None:
  *             if protein:             # <<<<<<<<<<<<<<
  *                 raise Exception("Must provide a substitution matrix for"
  *                                 " protein sequences")
  */
-    __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_v_protein); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    if (__pyx_t_11) {
+    __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_protein); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (__pyx_t_13) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":595
+      /* "skbio/alignment/_ssw_wrapper.pyx":580
  *         if substitution_matrix is None:
  *             if protein:
  *                 raise Exception("Must provide a substitution matrix for"             # <<<<<<<<<<<<<<
  *                                 " protein sequences")
  *             matrix = self._build_match_matrix(match_score, mismatch_score)
  */
-      __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_Exception, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_9);
-      __Pyx_Raise(__pyx_t_9, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_Exception, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_1);
+      __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":597
+    /* "skbio/alignment/_ssw_wrapper.pyx":582
  *                 raise Exception("Must provide a substitution matrix for"
  *                                 " protein sequences")
  *             matrix = self._build_match_matrix(match_score, mismatch_score)             # <<<<<<<<<<<<<<
  *         else:
  *             matrix = self._convert_dict2d_to_matrix(substitution_matrix)
  */
-    __pyx_t_9 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_build_match_matrix(__pyx_v_self, __pyx_v_match_score, __pyx_v_mismatch_score)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_9);
+    __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_build_match_matrix(__pyx_v_self, __pyx_v_match_score, __pyx_v_mismatch_score)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 582; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
     {
       __Pyx_BufFmt_StackElem __pyx_stack[1];
       __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer);
-      __pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_9), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
-      if (unlikely(__pyx_t_10 < 0)) {
-        PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
+      __pyx_t_12 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_1), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
+      if (unlikely(__pyx_t_12 < 0)) {
+        PyErr_Fetch(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16);
         if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)__pyx_v_matrix, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack) == -1)) {
-          Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14);
+          Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16);
           __Pyx_RaiseBufferFallbackError();
         } else {
-          PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14);
+          PyErr_Restore(__pyx_t_14, __pyx_t_15, __pyx_t_16);
         }
       }
       __pyx_pybuffernd_matrix.diminfo[0].strides = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_matrix.diminfo[0].shape = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.shape[0];
-      if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 582; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    __pyx_v_matrix = ((PyArrayObject *)__pyx_t_9);
-    __pyx_t_9 = 0;
+    __pyx_v_matrix = ((PyArrayObject *)__pyx_t_1);
+    __pyx_t_1 = 0;
     goto __pyx_L5;
   }
   /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":599
+    /* "skbio/alignment/_ssw_wrapper.pyx":584
  *             matrix = self._build_match_matrix(match_score, mismatch_score)
  *         else:
  *             matrix = self._convert_dict2d_to_matrix(substitution_matrix)             # <<<<<<<<<<<<<<
  *         # Set up our mask_length
  *         # Mask is recommended to be max(query_sequence/2, 15)
  */
-    __pyx_t_9 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_substitution_matrix)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_9);
+    __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_substitution_matrix)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
     {
       __Pyx_BufFmt_StackElem __pyx_stack[1];
       __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer);
-      __pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_9), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
-      if (unlikely(__pyx_t_10 < 0)) {
-        PyErr_Fetch(&__pyx_t_14, &__pyx_t_13, &__pyx_t_12);
+      __pyx_t_12 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_1), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
+      if (unlikely(__pyx_t_12 < 0)) {
+        PyErr_Fetch(&__pyx_t_16, &__pyx_t_15, &__pyx_t_14);
         if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer, (PyObject*)__pyx_v_matrix, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack) == -1)) {
-          Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12);
+          Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_14);
           __Pyx_RaiseBufferFallbackError();
         } else {
-          PyErr_Restore(__pyx_t_14, __pyx_t_13, __pyx_t_12);
+          PyErr_Restore(__pyx_t_16, __pyx_t_15, __pyx_t_14);
         }
       }
       __pyx_pybuffernd_matrix.diminfo[0].strides = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_matrix.diminfo[0].shape = __pyx_pybuffernd_matrix.rcbuffer->pybuffer.shape[0];
-      if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    __pyx_v_matrix = ((PyArrayObject *)__pyx_t_9);
-    __pyx_t_9 = 0;
+    __pyx_v_matrix = ((PyArrayObject *)__pyx_t_1);
+    __pyx_t_1 = 0;
   }
   __pyx_L5:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":602
+  /* "skbio/alignment/_ssw_wrapper.pyx":587
  *         # Set up our mask_length
  *         # Mask is recommended to be max(query_sequence/2, 15)
  *         if mask_auto:             # <<<<<<<<<<<<<<
  *             self.mask_length = len(query_sequence) / 2
  *             if self.mask_length < mask_length:
  */
-  __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_v_mask_auto); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  if (__pyx_t_11) {
+  __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_mask_auto); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_13) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":603
+    /* "skbio/alignment/_ssw_wrapper.pyx":588
  *         # Mask is recommended to be max(query_sequence/2, 15)
  *         if mask_auto:
  *             self.mask_length = len(query_sequence) / 2             # <<<<<<<<<<<<<<
  *             if self.mask_length < mask_length:
  *                 self.mask_length = mask_length
  */
-    __pyx_t_15 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_15 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_v_self->mask_length = __Pyx_div_Py_ssize_t(__pyx_t_15, 2);
+    __pyx_t_10 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_v_self->mask_length = __Pyx_div_Py_ssize_t(__pyx_t_10, 2);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":604
+    /* "skbio/alignment/_ssw_wrapper.pyx":589
  *         if mask_auto:
  *             self.mask_length = len(query_sequence) / 2
  *             if self.mask_length < mask_length:             # <<<<<<<<<<<<<<
  *                 self.mask_length = mask_length
  *         else:
  */
-    __pyx_t_9 = __Pyx_PyInt_From_npy_int32(__pyx_v_self->mask_length); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_9);
-    __pyx_t_8 = PyObject_RichCompare(__pyx_t_9, __pyx_v_mask_length, Py_LT); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-    __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyInt_From_npy_int32(__pyx_v_self->mask_length); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __pyx_t_8 = PyObject_RichCompare(__pyx_t_1, __pyx_v_mask_length, Py_LT); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+    __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-    if (__pyx_t_11) {
+    if (__pyx_t_13) {
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":605
+      /* "skbio/alignment/_ssw_wrapper.pyx":590
  *             self.mask_length = len(query_sequence) / 2
  *             if self.mask_length < mask_length:
  *                 self.mask_length = mask_length             # <<<<<<<<<<<<<<
  *         else:
  *             self.mask_length = mask_length
  */
-      __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 605; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __pyx_v_self->mask_length = __pyx_t_4;
       goto __pyx_L8;
     }
@@ -5200,82 +5279,82 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   }
   /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":607
+    /* "skbio/alignment/_ssw_wrapper.pyx":592
  *                 self.mask_length = mask_length
  *         else:
  *             self.mask_length = mask_length             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] read_seq
  */
-    __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyInt_As_npy_int32(__pyx_v_mask_length); if (unlikely((__pyx_t_4 == (npy_int32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __pyx_v_self->mask_length = __pyx_t_4;
   }
   __pyx_L7:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":610
+  /* "skbio/alignment/_ssw_wrapper.pyx":595
  * 
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] read_seq
  *         read_seq = self._seq_converter(query_sequence)             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.int32_t read_length
  */
-  __pyx_t_8 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_query_sequence)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 610; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_8 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_query_sequence)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_8);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
     __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_read_seq.rcbuffer->pybuffer);
-    __pyx_t_10 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_read_seq.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_8), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
-    if (unlikely(__pyx_t_10 < 0)) {
-      PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
+    __pyx_t_12 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_read_seq.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_8), &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack);
+    if (unlikely(__pyx_t_12 < 0)) {
+      PyErr_Fetch(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16);
       if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_read_seq.rcbuffer->pybuffer, (PyObject*)__pyx_v_read_seq, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 1, 0, __pyx_stack) == -1)) {
-        Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14);
+        Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16);
         __Pyx_RaiseBufferFallbackError();
       } else {
-        PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14);
+        PyErr_Restore(__pyx_t_14, __pyx_t_15, __pyx_t_16);
       }
     }
     __pyx_pybuffernd_read_seq.diminfo[0].strides = __pyx_pybuffernd_read_seq.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_read_seq.diminfo[0].shape = __pyx_pybuffernd_read_seq.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 610; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_v_read_seq = ((PyArrayObject *)__pyx_t_8);
   __pyx_t_8 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":613
+  /* "skbio/alignment/_ssw_wrapper.pyx":598
  * 
  *         cdef cnp.int32_t read_length
  *         read_length = len(query_sequence)             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.int8_t s_size
  */
-  __pyx_t_15 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_15 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 613; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_v_read_length = __pyx_t_15;
+  __pyx_t_10 = PyObject_Length(__pyx_v_query_sequence); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 598; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_read_length = __pyx_t_10;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":616
+  /* "skbio/alignment/_ssw_wrapper.pyx":601
  * 
  *         cdef cnp.int8_t s_size
  *         s_size = score_size             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.int32_t m_width
  */
-  __pyx_t_16 = __Pyx_PyInt_As_npy_int8(__pyx_v_score_size); if (unlikely((__pyx_t_16 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_v_s_size = __pyx_t_16;
+  __pyx_t_17 = __Pyx_PyInt_As_npy_int8(__pyx_v_score_size); if (unlikely((__pyx_t_17 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 601; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_v_s_size = __pyx_t_17;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":619
+  /* "skbio/alignment/_ssw_wrapper.pyx":604
  * 
  *         cdef cnp.int32_t m_width
  *         m_width = 24 if self.is_protein else 5             # <<<<<<<<<<<<<<
  * 
  *         cdef s_profile* p
  */
-  __pyx_t_11 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  if (__pyx_t_11) {
+  __pyx_t_13 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__pyx_t_13) {
     __pyx_t_4 = 24;
   } else {
     __pyx_t_4 = 5;
   }
   __pyx_v_m_width = __pyx_t_4;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":622
+  /* "skbio/alignment/_ssw_wrapper.pyx":607
  * 
  *         cdef s_profile* p
  *         self.profile = ssw_init(<cnp.int8_t*> read_seq.data,             # <<<<<<<<<<<<<<
@@ -5284,7 +5363,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
  */
   __pyx_v_self->profile = ssw_init(((__pyx_t_5numpy_int8_t *)__pyx_v_read_seq->data), __pyx_v_read_length, ((__pyx_t_5numpy_int8_t *)__pyx_v_matrix->data), __pyx_v_m_width, __pyx_v_s_size);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":629
+  /* "skbio/alignment/_ssw_wrapper.pyx":614
  * 
  *         # A hack to keep the python GC from eating our data
  *         self.__KEEP_IT_IN_SCOPE_read = read_seq             # <<<<<<<<<<<<<<
@@ -5297,7 +5376,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __Pyx_DECREF(((PyObject *)__pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_read));
   __pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_read = ((PyArrayObject *)__pyx_v_read_seq);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":630
+  /* "skbio/alignment/_ssw_wrapper.pyx":615
  *         # A hack to keep the python GC from eating our data
  *         self.__KEEP_IT_IN_SCOPE_read = read_seq
  *         self.__KEEP_IT_IN_SCOPE_matrix = matrix             # <<<<<<<<<<<<<<
@@ -5310,7 +5389,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __Pyx_DECREF(((PyObject *)__pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_matrix));
   __pyx_v_self->__pyx___KEEP_IT_IN_SCOPE_matrix = ((PyArrayObject *)__pyx_v_matrix);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":558
+  /* "skbio/alignment/_ssw_wrapper.pyx":543
  *     cdef cnp.ndarray __KEEP_IT_IN_SCOPE_matrix
  * 
  *     def __cinit__(self, query_sequence,             # <<<<<<<<<<<<<<
@@ -5325,6 +5404,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   __Pyx_XDECREF(__pyx_t_1);
   __Pyx_XDECREF(__pyx_t_8);
   __Pyx_XDECREF(__pyx_t_9);
+  __Pyx_XDECREF(__pyx_t_11);
   { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
     __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
     __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_matrix.rcbuffer->pybuffer);
@@ -5343,7 +5423,7 @@ static int __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman___ci
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":632
+/* "skbio/alignment/_ssw_wrapper.pyx":617
  *         self.__KEEP_IT_IN_SCOPE_matrix = matrix
  * 
  *     def __call__(self, target_sequence):             # <<<<<<<<<<<<<<
@@ -5383,7 +5463,7 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
         else goto __pyx_L5_argtuple_error;
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 617; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
       goto __pyx_L5_argtuple_error;
@@ -5394,7 +5474,7 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 617; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -5434,7 +5514,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __pyx_pybuffernd_reference.data = NULL;
   __pyx_pybuffernd_reference.rcbuffer = &__pyx_pybuffer_reference;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":645
+  /* "skbio/alignment/_ssw_wrapper.pyx":630
  * 
  *         """
  *         reference_sequence = target_sequence             # <<<<<<<<<<<<<<
@@ -5444,14 +5524,14 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __Pyx_INCREF(__pyx_v_target_sequence);
   __pyx_v_reference_sequence = __pyx_v_target_sequence;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":647
+  /* "skbio/alignment/_ssw_wrapper.pyx":632
  *         reference_sequence = target_sequence
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] reference
  *         reference = self._seq_converter(reference_sequence)             # <<<<<<<<<<<<<<
  * 
  *         cdef cnp.int32_t ref_length
  */
-  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_reference_sequence)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_seq_converter(__pyx_v_self, __pyx_v_reference_sequence)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -5467,22 +5547,22 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
       }
     }
     __pyx_pybuffernd_reference.diminfo[0].strides = __pyx_pybuffernd_reference.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_reference.diminfo[0].shape = __pyx_pybuffernd_reference.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_v_reference = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":650
+  /* "skbio/alignment/_ssw_wrapper.pyx":635
  * 
  *         cdef cnp.int32_t ref_length
  *         ref_length = len(reference_sequence)             # <<<<<<<<<<<<<<
  * 
  *         cdef s_align *align
  */
-  __pyx_t_6 = PyObject_Length(__pyx_v_reference_sequence); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = PyObject_Length(__pyx_v_reference_sequence); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_v_ref_length = __pyx_t_6;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":653
+  /* "skbio/alignment/_ssw_wrapper.pyx":638
  * 
  *         cdef s_align *align
  *         align = ssw_align(self.profile, <cnp.int8_t*> reference.data,             # <<<<<<<<<<<<<<
@@ -5491,37 +5571,37 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  */
   __pyx_v_align = ssw_align(__pyx_v_self->profile, ((__pyx_t_5numpy_int8_t *)__pyx_v_reference->data), __pyx_v_ref_length, __pyx_v_self->gap_open_penalty, __pyx_v_self->gap_extend_penalty, __pyx_v_self->bit_flag, __pyx_v_self->score_filter, __pyx_v_self->distance_filter, __pyx_v_self->mask_length);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":660
+  /* "skbio/alignment/_ssw_wrapper.pyx":645
  * 
  *         # Cython won't let me do this correctly, so duplicate code ahoy:
  *         if self.suppress_sequences:             # <<<<<<<<<<<<<<
  *             alignment = AlignmentStructure("", "", self.index_starts_at)
  *         else:
  */
-  __pyx_t_7 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->suppress_sequences)); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_7 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->suppress_sequences)); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_7) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":661
+    /* "skbio/alignment/_ssw_wrapper.pyx":646
  *         # Cython won't let me do this correctly, so duplicate code ahoy:
  *         if self.suppress_sequences:
  *             alignment = AlignmentStructure("", "", self.index_starts_at)             # <<<<<<<<<<<<<<
  *         else:
  *             alignment = AlignmentStructure(self.read_sequence,
  */
-    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_8);
     __Pyx_INCREF(__pyx_kp_s__6);
-    PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_kp_s__6);
     __Pyx_GIVEREF(__pyx_kp_s__6);
+    PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_kp_s__6);
     __Pyx_INCREF(__pyx_kp_s__6);
-    PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_kp_s__6);
     __Pyx_GIVEREF(__pyx_kp_s__6);
-    PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_1);
+    PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_kp_s__6);
     __Pyx_GIVEREF(__pyx_t_1);
+    PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_1);
     __pyx_t_1 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
     __pyx_v_alignment = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_t_1);
@@ -5530,35 +5610,35 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":665
+    /* "skbio/alignment/_ssw_wrapper.pyx":650
  *             alignment = AlignmentStructure(self.read_sequence,
  *                                            reference_sequence,
  *                                            self.index_starts_at)             # <<<<<<<<<<<<<<
  *         alignment.__constructor(align)  # Hack to get a pointer through
  *         return alignment
  */
-    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->index_starts_at); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":663
+    /* "skbio/alignment/_ssw_wrapper.pyx":648
  *             alignment = AlignmentStructure("", "", self.index_starts_at)
  *         else:
  *             alignment = AlignmentStructure(self.read_sequence,             # <<<<<<<<<<<<<<
  *                                            reference_sequence,
  *                                            self.index_starts_at)
  */
-    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 663; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_8);
     __Pyx_INCREF(__pyx_v_self->read_sequence);
-    PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_self->read_sequence);
     __Pyx_GIVEREF(__pyx_v_self->read_sequence);
+    PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_self->read_sequence);
     __Pyx_INCREF(__pyx_v_reference_sequence);
-    PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_v_reference_sequence);
     __Pyx_GIVEREF(__pyx_v_reference_sequence);
-    PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_1);
+    PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_v_reference_sequence);
     __Pyx_GIVEREF(__pyx_t_1);
+    PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_1);
     __pyx_t_1 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 663; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
     __pyx_v_alignment = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_t_1);
@@ -5566,18 +5646,18 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":666
+  /* "skbio/alignment/_ssw_wrapper.pyx":651
  *                                            reference_sequence,
  *                                            self.index_starts_at)
  *         alignment.__constructor(align)  # Hack to get a pointer through             # <<<<<<<<<<<<<<
  *         return alignment
  * 
  */
-  __pyx_t_1 = ((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_alignment->__pyx_vtab)->__pyx___constructor(__pyx_v_alignment, __pyx_v_align); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = ((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure *)__pyx_v_alignment->__pyx_vtab)->__pyx___constructor(__pyx_v_alignment, __pyx_v_align); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 651; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":667
+  /* "skbio/alignment/_ssw_wrapper.pyx":652
  *                                            self.index_starts_at)
  *         alignment.__constructor(align)  # Hack to get a pointer through
  *         return alignment             # <<<<<<<<<<<<<<
@@ -5589,7 +5669,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __pyx_r = ((PyObject *)__pyx_v_alignment);
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":632
+  /* "skbio/alignment/_ssw_wrapper.pyx":617
  *         self.__KEEP_IT_IN_SCOPE_matrix = matrix
  * 
  *     def __call__(self, target_sequence):             # <<<<<<<<<<<<<<
@@ -5619,7 +5699,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":669
+/* "skbio/alignment/_ssw_wrapper.pyx":654
  *         return alignment
  * 
  *     def __dealloc__(self):             # <<<<<<<<<<<<<<
@@ -5643,7 +5723,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("__dealloc__", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":670
+  /* "skbio/alignment/_ssw_wrapper.pyx":655
  * 
  *     def __dealloc__(self):
  *         if self.profile is not NULL:             # <<<<<<<<<<<<<<
@@ -5653,7 +5733,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__
   __pyx_t_1 = ((__pyx_v_self->profile != NULL) != 0);
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":671
+    /* "skbio/alignment/_ssw_wrapper.pyx":656
  *     def __dealloc__(self):
  *         if self.profile is not NULL:
  *             init_destroy(self.profile)             # <<<<<<<<<<<<<<
@@ -5665,7 +5745,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":669
+  /* "skbio/alignment/_ssw_wrapper.pyx":654
  *         return alignment
  * 
  *     def __dealloc__(self):             # <<<<<<<<<<<<<<
@@ -5677,7 +5757,7 @@ static void __pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_4__
   __Pyx_RefNannyFinishContext();
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":673
+/* "skbio/alignment/_ssw_wrapper.pyx":658
  *             init_destroy(self.profile)
  * 
  *     def _get_bit_flag(self, override_skip_babp, score_only):             # <<<<<<<<<<<<<<
@@ -5716,11 +5796,11 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
         case  1:
         if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_score_only)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_bit_flag") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_get_bit_flag") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
       goto __pyx_L5_argtuple_error;
@@ -5733,7 +5813,7 @@ static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("_get_bit_flag", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._get_bit_flag", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
@@ -5757,7 +5837,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_get_bit_flag", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":674
+  /* "skbio/alignment/_ssw_wrapper.pyx":659
  * 
  *     def _get_bit_flag(self, override_skip_babp, score_only):
  *         bit_flag = 0             # <<<<<<<<<<<<<<
@@ -5766,17 +5846,17 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  */
   __pyx_v_bit_flag = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":675
+  /* "skbio/alignment/_ssw_wrapper.pyx":660
  *     def _get_bit_flag(self, override_skip_babp, score_only):
  *         bit_flag = 0
  *         if score_only:             # <<<<<<<<<<<<<<
  *             return bit_flag
  *         if override_skip_babp:
  */
-  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_score_only); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_score_only); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":676
+    /* "skbio/alignment/_ssw_wrapper.pyx":661
  *         bit_flag = 0
  *         if score_only:
  *             return bit_flag             # <<<<<<<<<<<<<<
@@ -5784,24 +5864,24 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  *             bit_flag = bit_flag | 0x8
  */
     __Pyx_XDECREF(__pyx_r);
-    __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __pyx_r = __pyx_t_2;
     __pyx_t_2 = 0;
     goto __pyx_L0;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":677
+  /* "skbio/alignment/_ssw_wrapper.pyx":662
  *         if score_only:
  *             return bit_flag
  *         if override_skip_babp:             # <<<<<<<<<<<<<<
  *             bit_flag = bit_flag | 0x8
  *         if self.distance_filter != 0:
  */
-  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_override_skip_babp); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_override_skip_babp); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":678
+    /* "skbio/alignment/_ssw_wrapper.pyx":663
  *             return bit_flag
  *         if override_skip_babp:
  *             bit_flag = bit_flag | 0x8             # <<<<<<<<<<<<<<
@@ -5813,7 +5893,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   __pyx_L4:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":679
+  /* "skbio/alignment/_ssw_wrapper.pyx":664
  *         if override_skip_babp:
  *             bit_flag = bit_flag | 0x8
  *         if self.distance_filter != 0:             # <<<<<<<<<<<<<<
@@ -5823,7 +5903,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __pyx_t_1 = ((__pyx_v_self->distance_filter != 0) != 0);
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":680
+    /* "skbio/alignment/_ssw_wrapper.pyx":665
  *             bit_flag = bit_flag | 0x8
  *         if self.distance_filter != 0:
  *             bit_flag = bit_flag | 0x4             # <<<<<<<<<<<<<<
@@ -5835,7 +5915,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   __pyx_L5:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":681
+  /* "skbio/alignment/_ssw_wrapper.pyx":666
  *         if self.distance_filter != 0:
  *             bit_flag = bit_flag | 0x4
  *         if self.score_filter != 0:             # <<<<<<<<<<<<<<
@@ -5845,7 +5925,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   __pyx_t_1 = ((__pyx_v_self->score_filter != 0) != 0);
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":682
+    /* "skbio/alignment/_ssw_wrapper.pyx":667
  *             bit_flag = bit_flag | 0x4
  *         if self.score_filter != 0:
  *             bit_flag = bit_flag | 0x2             # <<<<<<<<<<<<<<
@@ -5857,7 +5937,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   }
   __pyx_L6:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":683
+  /* "skbio/alignment/_ssw_wrapper.pyx":668
  *         if self.score_filter != 0:
  *             bit_flag = bit_flag | 0x2
  *         if bit_flag == 0 or bit_flag == 8:             # <<<<<<<<<<<<<<
@@ -5868,7 +5948,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
     case 0:
     case 8:
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":684
+    /* "skbio/alignment/_ssw_wrapper.pyx":669
  *             bit_flag = bit_flag | 0x2
  *         if bit_flag == 0 or bit_flag == 8:
  *             bit_flag = bit_flag | 0x1             # <<<<<<<<<<<<<<
@@ -5880,7 +5960,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
     default: break;
   }
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":685
+  /* "skbio/alignment/_ssw_wrapper.pyx":670
  *         if bit_flag == 0 or bit_flag == 8:
  *             bit_flag = bit_flag | 0x1
  *         return bit_flag             # <<<<<<<<<<<<<<
@@ -5888,13 +5968,13 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 685; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_bit_flag); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 670; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __pyx_r = __pyx_t_2;
   __pyx_t_2 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":673
+  /* "skbio/alignment/_ssw_wrapper.pyx":658
  *             init_destroy(self.profile)
  * 
  *     def _get_bit_flag(self, override_skip_babp, score_only):             # <<<<<<<<<<<<<<
@@ -5913,7 +5993,7 @@ static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterma
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":687
+/* "skbio/alignment/_ssw_wrapper.pyx":672
  *         return bit_flag
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(             # <<<<<<<<<<<<<<
@@ -5951,41 +6031,41 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __pyx_pybuffernd_seq.data = NULL;
   __pyx_pybuffernd_seq.rcbuffer = &__pyx_pybuffer_seq;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":691
+  /* "skbio/alignment/_ssw_wrapper.pyx":676
  *             sequence):
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] seq
  *         seq = np.empty(len(sequence), dtype=np.int8)             # <<<<<<<<<<<<<<
  *         if self.is_protein:
  *             for i, char in enumerate(sequence):
  */
-  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_3 = PyObject_Length(__pyx_v_sequence); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyObject_Length(__pyx_v_sequence); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_int8); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_int8); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_6);
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_6);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_7 = ((PyArrayObject *)__pyx_t_6);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -6001,23 +6081,23 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       }
     }
     __pyx_pybuffernd_seq.diminfo[0].strides = __pyx_pybuffernd_seq.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_seq.diminfo[0].shape = __pyx_pybuffernd_seq.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_7 = 0;
   __pyx_v_seq = ((PyArrayObject *)__pyx_t_6);
   __pyx_t_6 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":692
+  /* "skbio/alignment/_ssw_wrapper.pyx":677
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] seq
  *         seq = np.empty(len(sequence), dtype=np.int8)
  *         if self.is_protein:             # <<<<<<<<<<<<<<
  *             for i, char in enumerate(sequence):
  *                 seq[i] = np_aa_table[ord(char)]
  */
-  __pyx_t_12 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_12 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_12 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_12) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":693
+    /* "skbio/alignment/_ssw_wrapper.pyx":678
  *         seq = np.empty(len(sequence), dtype=np.int8)
  *         if self.is_protein:
  *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
@@ -6026,36 +6106,40 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
  */
     __Pyx_INCREF(__pyx_int_0);
     __pyx_t_6 = __pyx_int_0;
-    if (PyList_CheckExact(__pyx_v_sequence) || PyTuple_CheckExact(__pyx_v_sequence)) {
+    if (likely(PyList_CheckExact(__pyx_v_sequence)) || PyTuple_CheckExact(__pyx_v_sequence)) {
       __pyx_t_1 = __pyx_v_sequence; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0;
       __pyx_t_13 = NULL;
     } else {
-      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
-      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext;
+      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     for (;;) {
-      if (!__pyx_t_13 && PyList_CheckExact(__pyx_t_1)) {
-        if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
-        #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #else
-        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #endif
-      } else if (!__pyx_t_13 && PyTuple_CheckExact(__pyx_t_1)) {
-        if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
-        #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #else
-        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #endif
+      if (likely(!__pyx_t_13)) {
+        if (likely(PyList_CheckExact(__pyx_t_1))) {
+          if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __Pyx_GOTREF(__pyx_t_4);
+          #endif
+        } else {
+          if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_4); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __Pyx_GOTREF(__pyx_t_4);
+          #endif
+        }
       } else {
         __pyx_t_4 = __pyx_t_13(__pyx_t_1);
         if (unlikely(!__pyx_t_4)) {
           PyObject* exc_type = PyErr_Occurred();
           if (exc_type) {
             if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           }
           break;
         }
@@ -6065,35 +6149,43 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __pyx_t_4 = 0;
       __Pyx_INCREF(__pyx_t_6);
       __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);
-      __pyx_t_4 = PyNumber_Add(__pyx_t_6, __pyx_int_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyNumber_Add(__pyx_t_6, __pyx_int_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_DECREF(__pyx_t_6);
       __pyx_t_6 = __pyx_t_4;
       __pyx_t_4 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":694
+      /* "skbio/alignment/_ssw_wrapper.pyx":679
  *         if self.is_protein:
  *             for i, char in enumerate(sequence):
  *                 seq[i] = np_aa_table[ord(char)]             # <<<<<<<<<<<<<<
  *         else:
  *             for i, char in enumerate(sequence):
  */
-      __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_aa_table); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_aa_table); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
       __Pyx_INCREF(__pyx_v_char);
-      PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_char);
       __Pyx_GIVEREF(__pyx_v_char);
-      __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ord, __pyx_t_2, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_char);
+      __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ord, __pyx_t_2, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_5);
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-      __pyx_t_2 = PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __pyx_t_2 = PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
       __Pyx_GOTREF(__pyx_t_2);
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_2) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_2) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":678
+ *         seq = np.empty(len(sequence), dtype=np.int8)
+ *         if self.is_protein:
+ *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
+ *                 seq[i] = np_aa_table[ord(char)]
+ *         else:
+ */
     }
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
@@ -6101,7 +6193,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   }
   /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":696
+    /* "skbio/alignment/_ssw_wrapper.pyx":681
  *                 seq[i] = np_aa_table[ord(char)]
  *         else:
  *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
@@ -6110,36 +6202,40 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
  */
     __Pyx_INCREF(__pyx_int_0);
     __pyx_t_6 = __pyx_int_0;
-    if (PyList_CheckExact(__pyx_v_sequence) || PyTuple_CheckExact(__pyx_v_sequence)) {
+    if (likely(PyList_CheckExact(__pyx_v_sequence)) || PyTuple_CheckExact(__pyx_v_sequence)) {
       __pyx_t_1 = __pyx_v_sequence; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0;
       __pyx_t_13 = NULL;
     } else {
-      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_1);
-      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext;
+      __pyx_t_13 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     for (;;) {
-      if (!__pyx_t_13 && PyList_CheckExact(__pyx_t_1)) {
-        if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
-        #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #else
-        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #endif
-      } else if (!__pyx_t_13 && PyTuple_CheckExact(__pyx_t_1)) {
-        if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
-        #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #else
-        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #endif
+      if (likely(!__pyx_t_13)) {
+        if (likely(PyList_CheckExact(__pyx_t_1))) {
+          if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __Pyx_GOTREF(__pyx_t_2);
+          #endif
+        } else {
+          if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __Pyx_GOTREF(__pyx_t_2);
+          #endif
+        }
       } else {
         __pyx_t_2 = __pyx_t_13(__pyx_t_1);
         if (unlikely(!__pyx_t_2)) {
           PyObject* exc_type = PyErr_Occurred();
           if (exc_type) {
             if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           }
           break;
         }
@@ -6149,42 +6245,50 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __pyx_t_2 = 0;
       __Pyx_INCREF(__pyx_t_6);
       __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);
-      __pyx_t_2 = PyNumber_Add(__pyx_t_6, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = PyNumber_Add(__pyx_t_6, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
       __Pyx_DECREF(__pyx_t_6);
       __pyx_t_6 = __pyx_t_2;
       __pyx_t_2 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":697
+      /* "skbio/alignment/_ssw_wrapper.pyx":682
  *         else:
  *             for i, char in enumerate(sequence):
  *                 seq[i] = np_nt_table[ord(char)]             # <<<<<<<<<<<<<<
  *         return seq
  * 
  */
-      __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_nt_table); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np_nt_table); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_2);
-      __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_5);
       __Pyx_INCREF(__pyx_v_char);
-      PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_char);
       __Pyx_GIVEREF(__pyx_v_char);
-      __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ord, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_char);
+      __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ord, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __pyx_t_5 = PyObject_GetItem(__pyx_t_2, __pyx_t_4); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __pyx_t_5 = PyObject_GetItem(__pyx_t_2, __pyx_t_4); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
       __Pyx_GOTREF(__pyx_t_5);
       __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_5) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_seq), __pyx_v_i, __pyx_t_5) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":681
+ *                 seq[i] = np_aa_table[ord(char)]
+ *         else:
+ *             for i, char in enumerate(sequence):             # <<<<<<<<<<<<<<
+ *                 seq[i] = np_nt_table[ord(char)]
+ *         return seq
+ */
     }
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
     __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":698
+  /* "skbio/alignment/_ssw_wrapper.pyx":683
  *             for i, char in enumerate(sequence):
  *                 seq[i] = np_nt_table[ord(char)]
  *         return seq             # <<<<<<<<<<<<<<
@@ -6196,7 +6300,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __pyx_r = ((PyArrayObject *)__pyx_v_seq);
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":687
+  /* "skbio/alignment/_ssw_wrapper.pyx":672
  *         return bit_flag
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] _seq_converter(             # <<<<<<<<<<<<<<
@@ -6229,7 +6333,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":700
+/* "skbio/alignment/_ssw_wrapper.pyx":685
  *         return seq
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
@@ -6253,14 +6357,13 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   PyObject *__pyx_t_7 = NULL;
   int __pyx_t_8;
   int __pyx_t_9;
-  int __pyx_t_10;
-  PyObject *__pyx_t_11 = NULL;
+  PyObject *__pyx_t_10 = NULL;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_build_match_matrix", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":702
+  /* "skbio/alignment/_ssw_wrapper.pyx":687
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
  *             _build_match_matrix(self, match_score, mismatch_score):
  *         sequence_order = "ACGTN"             # <<<<<<<<<<<<<<
@@ -6270,55 +6373,59 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __Pyx_INCREF(__pyx_n_s_ACGTN);
   __pyx_v_sequence_order = __pyx_n_s_ACGTN;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":703
+  /* "skbio/alignment/_ssw_wrapper.pyx":688
  *             _build_match_matrix(self, match_score, mismatch_score):
  *         sequence_order = "ACGTN"
  *         dict2d = {}             # <<<<<<<<<<<<<<
  *         for row in sequence_order:
  *             dict2d[row] = {}
  */
-  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_v_dict2d = ((PyObject*)__pyx_t_1);
   __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":704
+  /* "skbio/alignment/_ssw_wrapper.pyx":689
  *         sequence_order = "ACGTN"
  *         dict2d = {}
  *         for row in sequence_order:             # <<<<<<<<<<<<<<
  *             dict2d[row] = {}
  *             for column in sequence_order:
  */
-  if (PyList_CheckExact(__pyx_v_sequence_order) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
+  if (likely(PyList_CheckExact(__pyx_v_sequence_order)) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
     __pyx_t_1 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
     __pyx_t_3 = NULL;
   } else {
-    __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext;
+    __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   for (;;) {
-    if (!__pyx_t_3 && PyList_CheckExact(__pyx_t_1)) {
-      if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
-    } else if (!__pyx_t_3 && PyTuple_CheckExact(__pyx_t_1)) {
-      if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
+    if (likely(!__pyx_t_3)) {
+      if (likely(PyList_CheckExact(__pyx_t_1))) {
+        if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_4);
+        #endif
+      } else {
+        if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_4);
+        #endif
+      }
     } else {
       __pyx_t_4 = __pyx_t_3(__pyx_t_1);
       if (unlikely(!__pyx_t_4)) {
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
@@ -6327,55 +6434,59 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
     __Pyx_XDECREF_SET(__pyx_v_row, __pyx_t_4);
     __pyx_t_4 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":705
+    /* "skbio/alignment/_ssw_wrapper.pyx":690
  *         dict2d = {}
  *         for row in sequence_order:
  *             dict2d[row] = {}             # <<<<<<<<<<<<<<
  *             for column in sequence_order:
  *                 if column == 'N' or row == 'N':
  */
-    __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
-    if (unlikely(PyDict_SetItem(__pyx_v_dict2d, __pyx_v_row, __pyx_t_4) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(PyDict_SetItem(__pyx_v_dict2d, __pyx_v_row, __pyx_t_4) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":706
+    /* "skbio/alignment/_ssw_wrapper.pyx":691
  *         for row in sequence_order:
  *             dict2d[row] = {}
  *             for column in sequence_order:             # <<<<<<<<<<<<<<
  *                 if column == 'N' or row == 'N':
  *                     dict2d[row][column] = 0
  */
-    if (PyList_CheckExact(__pyx_v_sequence_order) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
+    if (likely(PyList_CheckExact(__pyx_v_sequence_order)) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
       __pyx_t_4 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
       __pyx_t_6 = NULL;
     } else {
-      __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext;
+      __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     for (;;) {
-      if (!__pyx_t_6 && PyList_CheckExact(__pyx_t_4)) {
-        if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
-        #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #else
-        __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #endif
-      } else if (!__pyx_t_6 && PyTuple_CheckExact(__pyx_t_4)) {
-        if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
-        #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #else
-        __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #endif
+      if (likely(!__pyx_t_6)) {
+        if (likely(PyList_CheckExact(__pyx_t_4))) {
+          if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __Pyx_GOTREF(__pyx_t_7);
+          #endif
+        } else {
+          if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __Pyx_GOTREF(__pyx_t_7);
+          #endif
+        }
       } else {
         __pyx_t_7 = __pyx_t_6(__pyx_t_4);
         if (unlikely(!__pyx_t_7)) {
           PyObject* exc_type = PyErr_Occurred();
           if (exc_type) {
             if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           }
           break;
         }
@@ -6384,61 +6495,63 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __Pyx_XDECREF_SET(__pyx_v_column, __pyx_t_7);
       __pyx_t_7 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":707
+      /* "skbio/alignment/_ssw_wrapper.pyx":692
  *             dict2d[row] = {}
  *             for column in sequence_order:
  *                 if column == 'N' or row == 'N':             # <<<<<<<<<<<<<<
  *                     dict2d[row][column] = 0
  *                 else:
  */
-      __pyx_t_8 = (__Pyx_PyString_Equals(__pyx_v_column, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      if (!__pyx_t_8) {
-        __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_row, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        __pyx_t_10 = __pyx_t_9;
+      __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_column, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      if (!__pyx_t_9) {
       } else {
-        __pyx_t_10 = __pyx_t_8;
+        __pyx_t_8 = __pyx_t_9;
+        goto __pyx_L8_bool_binop_done;
       }
-      if (__pyx_t_10) {
+      __pyx_t_9 = (__Pyx_PyString_Equals(__pyx_v_row, __pyx_n_s_N, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_8 = __pyx_t_9;
+      __pyx_L8_bool_binop_done:;
+      if (__pyx_t_8) {
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":708
+        /* "skbio/alignment/_ssw_wrapper.pyx":693
  *             for column in sequence_order:
  *                 if column == 'N' or row == 'N':
  *                     dict2d[row][column] = 0             # <<<<<<<<<<<<<<
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \
  */
-        __pyx_t_7 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+        __pyx_t_7 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
         __Pyx_GOTREF(__pyx_t_7);
-        if (unlikely(PyObject_SetItem(__pyx_t_7, __pyx_v_column, __pyx_int_0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        if (unlikely(PyObject_SetItem(__pyx_t_7, __pyx_v_column, __pyx_int_0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
         goto __pyx_L7;
       }
       /*else*/ {
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":711
+        /* "skbio/alignment/_ssw_wrapper.pyx":696
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \
  *                         else mismatch_score             # <<<<<<<<<<<<<<
  *         return self._convert_dict2d_to_matrix(dict2d)
  * 
  */
-        __pyx_t_11 = PyObject_RichCompare(__pyx_v_row, __pyx_v_column, Py_EQ); __Pyx_XGOTREF(__pyx_t_11); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_10 = PyObject_RichCompare(__pyx_v_row, __pyx_v_column, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":710
+        /* "skbio/alignment/_ssw_wrapper.pyx":695
  *                     dict2d[row][column] = 0
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \             # <<<<<<<<<<<<<<
  *                         else mismatch_score
  *         return self._convert_dict2d_to_matrix(dict2d)
  */
-        __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_11); if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
-        if (__pyx_t_10) {
+        __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+        if (__pyx_t_8) {
           __Pyx_INCREF(__pyx_v_match_score);
           __pyx_t_7 = __pyx_v_match_score;
         } else {
 
-          /* "skbio/alignment/_ssw_wrapper.pyx":711
+          /* "skbio/alignment/_ssw_wrapper.pyx":696
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \
  *                         else mismatch_score             # <<<<<<<<<<<<<<
@@ -6449,26 +6562,42 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
           __pyx_t_7 = __pyx_v_mismatch_score;
         }
 
-        /* "skbio/alignment/_ssw_wrapper.pyx":710
+        /* "skbio/alignment/_ssw_wrapper.pyx":695
  *                     dict2d[row][column] = 0
  *                 else:
  *                     dict2d[row][column] = match_score if row == column \             # <<<<<<<<<<<<<<
  *                         else mismatch_score
  *         return self._convert_dict2d_to_matrix(dict2d)
  */
-        __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
-        __Pyx_GOTREF(__pyx_t_11);
-        if (unlikely(PyObject_SetItem(__pyx_t_11, __pyx_v_column, __pyx_t_7) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+        __pyx_t_10 = __Pyx_PyDict_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_10 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+        __Pyx_GOTREF(__pyx_t_10);
+        if (unlikely(PyObject_SetItem(__pyx_t_10, __pyx_v_column, __pyx_t_7) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
         __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
       }
       __pyx_L7:;
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":691
+ *         for row in sequence_order:
+ *             dict2d[row] = {}
+ *             for column in sequence_order:             # <<<<<<<<<<<<<<
+ *                 if column == 'N' or row == 'N':
+ *                     dict2d[row][column] = 0
+ */
     }
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":689
+ *         sequence_order = "ACGTN"
+ *         dict2d = {}
+ *         for row in sequence_order:             # <<<<<<<<<<<<<<
+ *             dict2d[row] = {}
+ *             for column in sequence_order:
+ */
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":712
+  /* "skbio/alignment/_ssw_wrapper.pyx":697
  *                     dict2d[row][column] = match_score if row == column \
  *                         else mismatch_score
  *         return self._convert_dict2d_to_matrix(dict2d)             # <<<<<<<<<<<<<<
@@ -6476,13 +6605,13 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
  */
   __Pyx_XDECREF(((PyObject *)__pyx_r));
-  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_dict2d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 712; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = ((PyObject *)((struct __pyx_vtabstruct_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_v_self->__pyx_vtab)->_convert_dict2d_to_matrix(__pyx_v_self, __pyx_v_dict2d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = ((PyArrayObject *)__pyx_t_1);
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":700
+  /* "skbio/alignment/_ssw_wrapper.pyx":685
  *         return seq
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
@@ -6495,7 +6624,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __Pyx_XDECREF(__pyx_t_1);
   __Pyx_XDECREF(__pyx_t_4);
   __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_11);
+  __Pyx_XDECREF(__pyx_t_10);
   __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._build_match_matrix", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __pyx_r = 0;
   __pyx_L0:;
@@ -6508,7 +6637,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   return __pyx_r;
 }
 
-/* "skbio/alignment/_ssw_wrapper.pyx":714
+/* "skbio/alignment/_ssw_wrapper.pyx":699
  *         return self._convert_dict2d_to_matrix(dict2d)
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
@@ -6550,17 +6679,17 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __pyx_pybuffernd_py_list_matrix.data = NULL;
   __pyx_pybuffernd_py_list_matrix.rcbuffer = &__pyx_pybuffer_py_list_matrix;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":716
+  /* "skbio/alignment/_ssw_wrapper.pyx":701
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \
  *             _convert_dict2d_to_matrix(self, dict2d):
  *         if self.is_protein:             # <<<<<<<<<<<<<<
  *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"
  *         else:
  */
-  __pyx_t_1 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->is_protein)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   if (__pyx_t_1) {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":717
+    /* "skbio/alignment/_ssw_wrapper.pyx":702
  *             _convert_dict2d_to_matrix(self, dict2d):
  *         if self.is_protein:
  *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"             # <<<<<<<<<<<<<<
@@ -6573,7 +6702,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   }
   /*else*/ {
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":719
+    /* "skbio/alignment/_ssw_wrapper.pyx":704
  *             sequence_order = "ARNDCQEGHILKMFPSTWYVBZX*"
  *         else:
  *             sequence_order = "ACGTN"             # <<<<<<<<<<<<<<
@@ -6585,7 +6714,7 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   }
   __pyx_L3:;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":720
+  /* "skbio/alignment/_ssw_wrapper.pyx":705
  *         else:
  *             sequence_order = "ACGTN"
  *         cdef int i = 0             # <<<<<<<<<<<<<<
@@ -6594,59 +6723,59 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
  */
   __pyx_v_i = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":721
+  /* "skbio/alignment/_ssw_wrapper.pyx":706
  *             sequence_order = "ACGTN"
  *         cdef int i = 0
  *         length = len(sequence_order)             # <<<<<<<<<<<<<<
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
  *             np.empty(length*length, dtype=np.int8)
  */
-  __pyx_t_2 = PyObject_Length(__pyx_v_sequence_order); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_3 = PyInt_FromSsize_t(__pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyObject_Length(__pyx_v_sequence_order); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyInt_FromSsize_t(__pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __pyx_v_length = __pyx_t_3;
   __pyx_t_3 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":723
+  /* "skbio/alignment/_ssw_wrapper.pyx":708
  *         length = len(sequence_order)
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
  *             np.empty(length*length, dtype=np.int8)             # <<<<<<<<<<<<<<
  *         for row in sequence_order:
  *             for column in sequence_order:
  */
-  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = PyNumber_Multiply(__pyx_v_length, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyNumber_Multiply(__pyx_v_length, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_5);
-  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
   __Pyx_GIVEREF(__pyx_t_3);
+  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
   __pyx_t_3 = 0;
-  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_6);
-  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_int8); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_int8); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_7);
   __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_7);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_8 = ((PyArrayObject *)__pyx_t_7);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
     if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int8_t, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
       __pyx_v_py_list_matrix = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.buf = NULL;
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     } else {__pyx_pybuffernd_py_list_matrix.diminfo[0].strides = __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_py_list_matrix.diminfo[0].shape = __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.shape[0];
     }
   }
@@ -6654,43 +6783,47 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __pyx_v_py_list_matrix = ((PyArrayObject *)__pyx_t_7);
   __pyx_t_7 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":724
+  /* "skbio/alignment/_ssw_wrapper.pyx":709
  *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
  *             np.empty(length*length, dtype=np.int8)
  *         for row in sequence_order:             # <<<<<<<<<<<<<<
  *             for column in sequence_order:
  *                 py_list_matrix[i] = dict2d[row][column]
  */
-  if (PyList_CheckExact(__pyx_v_sequence_order) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
+  if (likely(PyList_CheckExact(__pyx_v_sequence_order)) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
     __pyx_t_7 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_7); __pyx_t_2 = 0;
     __pyx_t_9 = NULL;
   } else {
-    __pyx_t_2 = -1; __pyx_t_7 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = -1; __pyx_t_7 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_7);
-    __pyx_t_9 = Py_TYPE(__pyx_t_7)->tp_iternext;
+    __pyx_t_9 = Py_TYPE(__pyx_t_7)->tp_iternext; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   for (;;) {
-    if (!__pyx_t_9 && PyList_CheckExact(__pyx_t_7)) {
-      if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_7)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_3 = PyList_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
-    } else if (!__pyx_t_9 && PyTuple_CheckExact(__pyx_t_7)) {
-      if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_7)) break;
-      #if CYTHON_COMPILING_IN_CPYTHON
-      __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #else
-      __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      #endif
+    if (likely(!__pyx_t_9)) {
+      if (likely(PyList_CheckExact(__pyx_t_7))) {
+        if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_7)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_3 = PyList_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        #endif
+      } else {
+        if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_7)) break;
+        #if CYTHON_COMPILING_IN_CPYTHON
+        __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_7, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        #else
+        __pyx_t_3 = PySequence_ITEM(__pyx_t_7, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __Pyx_GOTREF(__pyx_t_3);
+        #endif
+      }
     } else {
       __pyx_t_3 = __pyx_t_9(__pyx_t_7);
       if (unlikely(!__pyx_t_3)) {
         PyObject* exc_type = PyErr_Occurred();
         if (exc_type) {
           if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         }
         break;
       }
@@ -6699,43 +6832,47 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
     __Pyx_XDECREF_SET(__pyx_v_row, __pyx_t_3);
     __pyx_t_3 = 0;
 
-    /* "skbio/alignment/_ssw_wrapper.pyx":725
+    /* "skbio/alignment/_ssw_wrapper.pyx":710
  *             np.empty(length*length, dtype=np.int8)
  *         for row in sequence_order:
  *             for column in sequence_order:             # <<<<<<<<<<<<<<
  *                 py_list_matrix[i] = dict2d[row][column]
  *                 i += 1
  */
-    if (PyList_CheckExact(__pyx_v_sequence_order) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
+    if (likely(PyList_CheckExact(__pyx_v_sequence_order)) || PyTuple_CheckExact(__pyx_v_sequence_order)) {
       __pyx_t_3 = __pyx_v_sequence_order; __Pyx_INCREF(__pyx_t_3); __pyx_t_10 = 0;
       __pyx_t_11 = NULL;
     } else {
-      __pyx_t_10 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_10 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_sequence_order); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_11 = Py_TYPE(__pyx_t_3)->tp_iternext;
+      __pyx_t_11 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     for (;;) {
-      if (!__pyx_t_11 && PyList_CheckExact(__pyx_t_3)) {
-        if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_3)) break;
-        #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_5 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #else
-        __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #endif
-      } else if (!__pyx_t_11 && PyTuple_CheckExact(__pyx_t_3)) {
-        if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
-        #if CYTHON_COMPILING_IN_CPYTHON
-        __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #else
-        __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-        #endif
+      if (likely(!__pyx_t_11)) {
+        if (likely(PyList_CheckExact(__pyx_t_3))) {
+          if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_3)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_5 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __Pyx_GOTREF(__pyx_t_5);
+          #endif
+        } else {
+          if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
+          #if CYTHON_COMPILING_IN_CPYTHON
+          __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_10); __Pyx_INCREF(__pyx_t_5); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          #else
+          __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+          __Pyx_GOTREF(__pyx_t_5);
+          #endif
+        }
       } else {
         __pyx_t_5 = __pyx_t_11(__pyx_t_3);
         if (unlikely(!__pyx_t_5)) {
           PyObject* exc_type = PyErr_Occurred();
           if (exc_type) {
             if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+            else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
           }
           break;
         }
@@ -6744,19 +6881,19 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       __Pyx_XDECREF_SET(__pyx_v_column, __pyx_t_5);
       __pyx_t_5 = 0;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":726
+      /* "skbio/alignment/_ssw_wrapper.pyx":711
  *         for row in sequence_order:
  *             for column in sequence_order:
  *                 py_list_matrix[i] = dict2d[row][column]             # <<<<<<<<<<<<<<
  *                 i += 1
  *         return py_list_matrix
  */
-      __pyx_t_5 = PyObject_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __pyx_t_5 = PyObject_GetItem(__pyx_v_dict2d, __pyx_v_row); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
       __Pyx_GOTREF(__pyx_t_5);
-      __pyx_t_4 = PyObject_GetItem(__pyx_t_5, __pyx_v_column); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+      __pyx_t_4 = PyObject_GetItem(__pyx_t_5, __pyx_v_column); if (unlikely(__pyx_t_4 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __pyx_t_12 = __Pyx_PyInt_As_npy_int8(__pyx_t_4); if (unlikely((__pyx_t_12 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_12 = __Pyx_PyInt_As_npy_int8(__pyx_t_4); if (unlikely((__pyx_t_12 == (npy_int8)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       __pyx_t_13 = __pyx_v_i;
       __pyx_t_14 = -1;
@@ -6766,36 +6903,49 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
       } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_py_list_matrix.diminfo[0].shape)) __pyx_t_14 = 0;
       if (unlikely(__pyx_t_14 != -1)) {
         __Pyx_RaiseBufferIndexError(__pyx_t_14);
-        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       *__Pyx_BufPtrCContig1d(__pyx_t_5numpy_int8_t *, __pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_py_list_matrix.diminfo[0].strides) = __pyx_t_12;
 
-      /* "skbio/alignment/_ssw_wrapper.pyx":727
+      /* "skbio/alignment/_ssw_wrapper.pyx":712
  *             for column in sequence_order:
  *                 py_list_matrix[i] = dict2d[row][column]
  *                 i += 1             # <<<<<<<<<<<<<<
  *         return py_list_matrix
- * 
  */
       __pyx_v_i = (__pyx_v_i + 1);
+
+      /* "skbio/alignment/_ssw_wrapper.pyx":710
+ *             np.empty(length*length, dtype=np.int8)
+ *         for row in sequence_order:
+ *             for column in sequence_order:             # <<<<<<<<<<<<<<
+ *                 py_list_matrix[i] = dict2d[row][column]
+ *                 i += 1
+ */
     }
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+    /* "skbio/alignment/_ssw_wrapper.pyx":709
+ *         cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] py_list_matrix = \
+ *             np.empty(length*length, dtype=np.int8)
+ *         for row in sequence_order:             # <<<<<<<<<<<<<<
+ *             for column in sequence_order:
+ *                 py_list_matrix[i] = dict2d[row][column]
+ */
   }
   __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":728
+  /* "skbio/alignment/_ssw_wrapper.pyx":713
  *                 py_list_matrix[i] = dict2d[row][column]
  *                 i += 1
  *         return py_list_matrix             # <<<<<<<<<<<<<<
- * 
- * 
  */
   __Pyx_XDECREF(((PyObject *)__pyx_r));
   __Pyx_INCREF(((PyObject *)__pyx_v_py_list_matrix));
   __pyx_r = ((PyArrayObject *)__pyx_v_py_list_matrix);
   goto __pyx_L0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":714
+  /* "skbio/alignment/_ssw_wrapper.pyx":699
  *         return self._convert_dict2d_to_matrix(dict2d)
  * 
  *     cdef cnp.ndarray[cnp.int8_t, ndim = 1, mode = "c"] \             # <<<<<<<<<<<<<<
@@ -6811,557 +6961,26 @@ static PyArrayObject *__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWat
   __Pyx_XDECREF(__pyx_t_6);
   __Pyx_XDECREF(__pyx_t_7);
   { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
-    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
-    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer);
-  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
-  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._convert_dict2d_to_matrix", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  goto __pyx_L2;
-  __pyx_L0:;
-  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer);
-  __pyx_L2:;
-  __Pyx_XDECREF(__pyx_v_sequence_order);
-  __Pyx_XDECREF(__pyx_v_length);
-  __Pyx_XDECREF((PyObject *)__pyx_v_py_list_matrix);
-  __Pyx_XDECREF(__pyx_v_row);
-  __Pyx_XDECREF(__pyx_v_column);
-  __Pyx_XGIVEREF((PyObject *)__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "skbio/alignment/_ssw_wrapper.pyx":731
- * 
- * 
- * def local_pairwise_align_ssw(sequence1, sequence2,             # <<<<<<<<<<<<<<
- *                              **kwargs):
- *     """Align query and target sequences with Striped Smith-Waterman.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw[] = "Align query and target sequences with Striped Smith-Waterman.\n\n    Parameters\n    ----------\n    sequence1 : str or BiologicalSequence\n        The first unaligned sequence\n    sequence2 : str or BiologicalSequence\n        The second unaligned sequence\n\n    Returns\n    -------\n    ``skbio.alignment.Alignment``\n        The resulting alignment as an Alignment object\n\n    Notes\n    -----\n     [...]
-static PyMethodDef __pyx_mdef_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw = {__Pyx_NAMESTR("local_pairwise_align_ssw"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw)};
-static PyObject *__pyx_pw_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_sequence1 = 0;
-  PyObject *__pyx_v_sequence2 = 0;
-  PyObject *__pyx_v_kwargs = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("local_pairwise_align_ssw (wrapper)", 0);
-  __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL;
-  __Pyx_GOTREF(__pyx_v_kwargs);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sequence1,&__pyx_n_s_sequence2,0};
-    PyObject* values[2] = {0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_sequence1)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        case  1:
-        if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_sequence2)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("local_pairwise_align_ssw", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "local_pairwise_align_ssw") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-    }
-    __pyx_v_sequence1 = values[0];
-    __pyx_v_sequence2 = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("local_pairwise_align_ssw", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
-  __pyx_L3_error:;
-  __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0;
-  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.local_pairwise_align_ssw", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw(__pyx_self, __pyx_v_sequence1, __pyx_v_sequence2, __pyx_v_kwargs);
-
-  /* function exit code */
-  __Pyx_XDECREF(__pyx_v_kwargs);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_5skbio_9alignment_12_ssw_wrapper_local_pairwise_align_ssw(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sequence1, PyObject *__pyx_v_sequence2, PyObject *__pyx_v_kwargs) {
-  struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *__pyx_v_query = NULL;
-  PyObject *__pyx_v_alignment = NULL;
-  PyObject *__pyx_v_start_end = NULL;
-  PyObject *__pyx_v_seqs = NULL;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("local_pairwise_align_ssw", 0);
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":773
- *     # We need the sequences for `Alignment` to make sense, so don't let the
- *     # user suppress them.
- *     kwargs['suppress_sequences'] = False             # <<<<<<<<<<<<<<
- *     kwargs['zero_index'] = True
- * 
- */
-  if (unlikely(PyDict_SetItem(__pyx_v_kwargs, __pyx_n_s_suppress_sequences, Py_False) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 773; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":774
- *     # user suppress them.
- *     kwargs['suppress_sequences'] = False
- *     kwargs['zero_index'] = True             # <<<<<<<<<<<<<<
- * 
- *     if isinstance(sequence1, ProteinSequence):
- */
-  if (unlikely(PyDict_SetItem(__pyx_v_kwargs, __pyx_n_s_zero_index, Py_True) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":776
- *     kwargs['zero_index'] = True
- * 
- *     if isinstance(sequence1, ProteinSequence):             # <<<<<<<<<<<<<<
- *         kwargs['protein'] = True
- * 
- */
-  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_ProteinSequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyObject_IsInstance(__pyx_v_sequence1, __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_3 = (__pyx_t_2 != 0);
-  if (__pyx_t_3) {
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":777
- * 
- *     if isinstance(sequence1, ProteinSequence):
- *         kwargs['protein'] = True             # <<<<<<<<<<<<<<
- * 
- *     query = StripedSmithWaterman(str(sequence1), **kwargs)
- */
-    if (unlikely(PyDict_SetItem(__pyx_v_kwargs, __pyx_n_s_protein, Py_True) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    goto __pyx_L3;
-  }
-  __pyx_L3:;
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":779
- *         kwargs['protein'] = True
- * 
- *     query = StripedSmithWaterman(str(sequence1), **kwargs)             # <<<<<<<<<<<<<<
- *     alignment = query(str(sequence2))
- * 
- */
-  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 779; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_v_sequence1);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_sequence1);
-  __Pyx_GIVEREF(__pyx_v_sequence1);
-  __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 779; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 779; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_4);
-  __pyx_t_4 = 0;
-  __pyx_t_4 = __pyx_v_kwargs;
-  __Pyx_INCREF(__pyx_t_4);
-  __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman)), __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 779; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_v_query = ((struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *)__pyx_t_5);
-  __pyx_t_5 = 0;
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":780
- * 
- *     query = StripedSmithWaterman(str(sequence1), **kwargs)
- *     alignment = query(str(sequence2))             # <<<<<<<<<<<<<<
- * 
- *     # If there is no cigar, then it has failed a filter. Return None.
- */
-  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_INCREF(__pyx_v_sequence2);
-  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_sequence2);
-  __Pyx_GIVEREF(__pyx_v_sequence2);
-  __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_5);
-  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_4);
-  __pyx_t_4 = 0;
-  __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_v_query), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 780; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_v_alignment = __pyx_t_4;
-  __pyx_t_4 = 0;
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":783
- * 
- *     # If there is no cigar, then it has failed a filter. Return None.
- *     if not alignment.cigar:             # <<<<<<<<<<<<<<
- *         return None
- * 
- */
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_cigar); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 783; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 783; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_2 = ((!__pyx_t_3) != 0);
-  if (__pyx_t_2) {
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":784
- *     # If there is no cigar, then it has failed a filter. Return None.
- *     if not alignment.cigar:
- *         return None             # <<<<<<<<<<<<<<
- * 
- *     start_end = None
- */
-    __Pyx_XDECREF(__pyx_r);
-    __Pyx_INCREF(Py_None);
-    __pyx_r = Py_None;
-    goto __pyx_L0;
-  }
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":786
- *         return None
- * 
- *     start_end = None             # <<<<<<<<<<<<<<
- *     if alignment.query_begin != -1:
- *         start_end = [
- */
-  __Pyx_INCREF(Py_None);
-  __pyx_v_start_end = Py_None;
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":787
- * 
- *     start_end = None
- *     if alignment.query_begin != -1:             # <<<<<<<<<<<<<<
- *         start_end = [
- *             (alignment.query_begin, alignment.query_end),
- */
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_query_begin); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_5 = PyObject_RichCompare(__pyx_t_4, __pyx_int_neg_1, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if (__pyx_t_2) {
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":789
- *     if alignment.query_begin != -1:
- *         start_end = [
- *             (alignment.query_begin, alignment.query_end),             # <<<<<<<<<<<<<<
- *             (alignment.target_begin, alignment.target_end_optimal)
- *         ]
- */
-    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_query_begin); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_query_end); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_5);
-    PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_4);
-    __Pyx_GIVEREF(__pyx_t_4);
-    __pyx_t_5 = 0;
-    __pyx_t_4 = 0;
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":790
- *         start_end = [
- *             (alignment.query_begin, alignment.query_end),
- *             (alignment.target_begin, alignment.target_end_optimal)             # <<<<<<<<<<<<<<
- *         ]
- *     if kwargs.get('protein', False):
- */
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_target_begin); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_6);
-    PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4);
-    __Pyx_GIVEREF(__pyx_t_4);
-    PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_5);
-    __pyx_t_4 = 0;
-    __pyx_t_5 = 0;
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":788
- *     start_end = None
- *     if alignment.query_begin != -1:
- *         start_end = [             # <<<<<<<<<<<<<<
- *             (alignment.query_begin, alignment.query_end),
- *             (alignment.target_begin, alignment.target_end_optimal)
- */
-    __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_5);
-    PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
-    __Pyx_GIVEREF(__pyx_t_1);
-    PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_6);
-    __Pyx_GIVEREF(__pyx_t_6);
-    __pyx_t_1 = 0;
-    __pyx_t_6 = 0;
-    __Pyx_DECREF_SET(__pyx_v_start_end, __pyx_t_5);
-    __pyx_t_5 = 0;
-    goto __pyx_L5;
-  }
-  __pyx_L5:;
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":792
- *             (alignment.target_begin, alignment.target_end_optimal)
- *         ]
- *     if kwargs.get('protein', False):             # <<<<<<<<<<<<<<
- *         seqs = [
- *             ProteinSequence(alignment.aligned_query_sequence, id='query'),
- */
-  __pyx_t_5 = __Pyx_PyDict_GetItemDefault(__pyx_v_kwargs, __pyx_n_s_protein, Py_False); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if (__pyx_t_2) {
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":794
- *     if kwargs.get('protein', False):
- *         seqs = [
- *             ProteinSequence(alignment.aligned_query_sequence, id='query'),             # <<<<<<<<<<<<<<
- *             ProteinSequence(alignment.aligned_target_sequence, id='target')
- *         ]
- */
-    __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_ProteinSequence); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_6);
-    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
-    __Pyx_GIVEREF(__pyx_t_6);
-    __pyx_t_6 = 0;
-    __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_6);
-    if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_id, __pyx_n_s_query) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_1, __pyx_t_6); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":795
- *         seqs = [
- *             ProteinSequence(alignment.aligned_query_sequence, id='query'),
- *             ProteinSequence(alignment.aligned_target_sequence, id='target')             # <<<<<<<<<<<<<<
- *         ]
- *     else:
- */
-    __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_ProteinSequence); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_6);
-    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_5);
-    PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
-    __Pyx_GIVEREF(__pyx_t_1);
-    __pyx_t_1 = 0;
-    __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_id, __pyx_n_s_target) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_7);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":793
- *         ]
- *     if kwargs.get('protein', False):
- *         seqs = [             # <<<<<<<<<<<<<<
- *             ProteinSequence(alignment.aligned_query_sequence, id='query'),
- *             ProteinSequence(alignment.aligned_target_sequence, id='target')
- */
-    __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 793; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    PyList_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
-    __Pyx_GIVEREF(__pyx_t_4);
-    PyList_SET_ITEM(__pyx_t_1, 1, __pyx_t_7);
-    __Pyx_GIVEREF(__pyx_t_7);
-    __pyx_t_4 = 0;
-    __pyx_t_7 = 0;
-    __pyx_v_seqs = ((PyObject*)__pyx_t_1);
-    __pyx_t_1 = 0;
-    goto __pyx_L6;
-  }
-  /*else*/ {
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":799
- *     else:
- *         seqs = [
- *             NucleotideSequence(alignment.aligned_query_sequence, id='query'),             # <<<<<<<<<<<<<<
- *             NucleotideSequence(alignment.aligned_target_sequence, id='target')
- *         ]
- */
-    __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_NucleotideSequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_7);
-    __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7);
-    __Pyx_GIVEREF(__pyx_t_7);
-    __pyx_t_7 = 0;
-    __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_7);
-    if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_id, __pyx_n_s_query) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_4, __pyx_t_7); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":800
- *         seqs = [
- *             NucleotideSequence(alignment.aligned_query_sequence, id='query'),
- *             NucleotideSequence(alignment.aligned_target_sequence, id='target')             # <<<<<<<<<<<<<<
- *         ]
- * 
- */
-    __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_NucleotideSequence); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_7);
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_1);
-    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
-    __Pyx_GIVEREF(__pyx_t_4);
-    __pyx_t_4 = 0;
-    __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_id, __pyx_n_s_target) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_6);
-    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
-    /* "skbio/alignment/_ssw_wrapper.pyx":798
- *         ]
- *     else:
- *         seqs = [             # <<<<<<<<<<<<<<
- *             NucleotideSequence(alignment.aligned_query_sequence, id='query'),
- *             NucleotideSequence(alignment.aligned_target_sequence, id='target')
- */
-    __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_5);
-    PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_6);
-    __Pyx_GIVEREF(__pyx_t_6);
-    __pyx_t_5 = 0;
-    __pyx_t_6 = 0;
-    __pyx_v_seqs = ((PyObject*)__pyx_t_4);
-    __pyx_t_4 = 0;
-  }
-  __pyx_L6:;
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":803
- *         ]
- * 
- *     return Alignment(seqs, score=alignment.optimal_alignment_score,             # <<<<<<<<<<<<<<
- *                      start_end_positions=start_end)
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_Alignment); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_INCREF(__pyx_v_seqs);
-  PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_seqs);
-  __Pyx_GIVEREF(__pyx_v_seqs);
-  __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_alignment, __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":804
- * 
- *     return Alignment(seqs, score=alignment.optimal_alignment_score,
- *                      start_end_positions=start_end)             # <<<<<<<<<<<<<<
- */
-  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_start_end_positions, __pyx_v_start_end) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":803
- *         ]
- * 
- *     return Alignment(seqs, score=alignment.optimal_alignment_score,             # <<<<<<<<<<<<<<
- *                      start_end_positions=start_end)
- */
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":731
- * 
- * 
- * def local_pairwise_align_ssw(sequence1, sequence2,             # <<<<<<<<<<<<<<
- *                              **kwargs):
- *     """Align query and target sequences with Striped Smith-Waterman.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.local_pairwise_align_ssw", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
+    __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+    __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer);
+  __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+  __Pyx_AddTraceback("skbio.alignment._ssw_wrapper.StripedSmithWaterman._convert_dict2d_to_matrix", __pyx_clineno, __pyx_lineno, __pyx_filename);
+  __pyx_r = 0;
+  goto __pyx_L2;
   __pyx_L0:;
-  __Pyx_XDECREF((PyObject *)__pyx_v_query);
-  __Pyx_XDECREF(__pyx_v_alignment);
-  __Pyx_XDECREF(__pyx_v_start_end);
-  __Pyx_XDECREF(__pyx_v_seqs);
-  __Pyx_XGIVEREF(__pyx_r);
+  __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_py_list_matrix.rcbuffer->pybuffer);
+  __pyx_L2:;
+  __Pyx_XDECREF(__pyx_v_sequence_order);
+  __Pyx_XDECREF(__pyx_v_length);
+  __Pyx_XDECREF((PyObject *)__pyx_v_py_list_matrix);
+  __Pyx_XDECREF(__pyx_v_row);
+  __Pyx_XDECREF(__pyx_v_column);
+  __Pyx_XGIVEREF((PyObject *)__pyx_r);
   __Pyx_RefNannyFinishContext();
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
  *         # experimental exception made for __getbuffer__ and __releasebuffer__
  *         # -- the details of this may change.
  *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
@@ -7397,13 +7016,11 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __Pyx_RefNannyDeclarations
   int __pyx_t_1;
   int __pyx_t_2;
-  int __pyx_t_3;
-  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_3 = NULL;
+  int __pyx_t_4;
   int __pyx_t_5;
-  int __pyx_t_6;
-  int __pyx_t_7;
-  PyObject *__pyx_t_8 = NULL;
-  char *__pyx_t_9;
+  PyObject *__pyx_t_6 = NULL;
+  char *__pyx_t_7;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
@@ -7413,7 +7030,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GIVEREF(__pyx_v_info->obj);
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
  *             # of flags
  * 
  *             if info == NULL: return             # <<<<<<<<<<<<<<
@@ -7426,7 +7043,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L0;
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
  * 
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1             # <<<<<<<<<<<<<<
@@ -7435,7 +7052,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_endian_detector = 1;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
@@ -7444,7 +7061,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
  * 
  *             ndim = PyArray_NDIM(self)             # <<<<<<<<<<<<<<
@@ -7453,7 +7070,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
  *             ndim = PyArray_NDIM(self)
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -7463,7 +7080,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 copy_shape = 1             # <<<<<<<<<<<<<<
@@ -7475,7 +7092,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
  *                 copy_shape = 1
  *             else:
  *                 copy_shape = 0             # <<<<<<<<<<<<<<
@@ -7486,83 +7103,87 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   __pyx_L4:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
  *                 copy_shape = 0
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")
  */
-  __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
-  if (__pyx_t_1) {
+  __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
+  if (__pyx_t_2) {
+  } else {
+    __pyx_t_1 = __pyx_t_2;
+    goto __pyx_L6_bool_binop_done;
+  }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):             # <<<<<<<<<<<<<<
  *                 raise ValueError(u"ndarray is not C contiguous")
  * 
  */
-    __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
-    __pyx_t_3 = __pyx_t_2;
-  } else {
-    __pyx_t_3 = __pyx_t_1;
-  }
-  if (__pyx_t_3) {
+  __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
+  __pyx_t_1 = __pyx_t_2;
+  __pyx_L6_bool_binop_done:;
+  if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  */
-    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_Raise(__pyx_t_4, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
  *                 raise ValueError(u"ndarray is not C contiguous")
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")
  */
-  __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
-  if (__pyx_t_3) {
+  __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
+  if (__pyx_t_2) {
+  } else {
+    __pyx_t_1 = __pyx_t_2;
+    goto __pyx_L9_bool_binop_done;
+  }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):             # <<<<<<<<<<<<<<
  *                 raise ValueError(u"ndarray is not Fortran contiguous")
  * 
  */
-    __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
-    __pyx_t_2 = __pyx_t_1;
-  } else {
-    __pyx_t_2 = __pyx_t_3;
-  }
-  if (__pyx_t_2) {
+  __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
+  __pyx_t_1 = __pyx_t_2;
+  __pyx_L9_bool_binop_done:;
+  if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
  * 
  *             info.buf = PyArray_DATA(self)
  */
-    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_Raise(__pyx_t_4, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224
  *                 raise ValueError(u"ndarray is not Fortran contiguous")
  * 
  *             info.buf = PyArray_DATA(self)             # <<<<<<<<<<<<<<
@@ -7571,7 +7192,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225
  * 
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim             # <<<<<<<<<<<<<<
@@ -7580,17 +7201,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->ndim = __pyx_v_ndim;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim
  *             if copy_shape:             # <<<<<<<<<<<<<<
  *                 # Allocate new buffer for strides and shape info.
  *                 # This is allocated as one block, strides first.
  */
-  __pyx_t_2 = (__pyx_v_copy_shape != 0);
-  if (__pyx_t_2) {
+  __pyx_t_1 = (__pyx_v_copy_shape != 0);
+  if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
  *                 # Allocate new buffer for strides and shape info.
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)             # <<<<<<<<<<<<<<
@@ -7599,7 +7220,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim             # <<<<<<<<<<<<<<
@@ -7608,18 +7229,18 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):             # <<<<<<<<<<<<<<
  *                     info.strides[i] = PyArray_STRIDES(self)[i]
  *                     info.shape[i] = PyArray_DIMS(self)[i]
  */
-    __pyx_t_5 = __pyx_v_ndim;
-    for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
-      __pyx_v_i = __pyx_t_6;
+    __pyx_t_4 = __pyx_v_ndim;
+    for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
+      __pyx_v_i = __pyx_t_5;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]             # <<<<<<<<<<<<<<
@@ -7628,7 +7249,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]
  *                     info.shape[i] = PyArray_DIMS(self)[i]             # <<<<<<<<<<<<<<
@@ -7637,11 +7258,11 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
     }
-    goto __pyx_L7;
+    goto __pyx_L11;
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
  *                     info.shape[i] = PyArray_DIMS(self)[i]
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)             # <<<<<<<<<<<<<<
@@ -7650,7 +7271,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)             # <<<<<<<<<<<<<<
@@ -7659,9 +7280,9 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
   }
-  __pyx_L7:;
+  __pyx_L11:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL             # <<<<<<<<<<<<<<
@@ -7670,7 +7291,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->suboffsets = NULL;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)             # <<<<<<<<<<<<<<
@@ -7679,7 +7300,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)
  *             info.readonly = not PyArray_ISWRITEABLE(self)             # <<<<<<<<<<<<<<
@@ -7688,7 +7309,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242
  * 
  *             cdef int t
  *             cdef char* f = NULL             # <<<<<<<<<<<<<<
@@ -7697,19 +7318,19 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_f = NULL;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243
  *             cdef int t
  *             cdef char* f = NULL
  *             cdef dtype descr = self.descr             # <<<<<<<<<<<<<<
  *             cdef list stack
  *             cdef int offset
  */
-  __pyx_t_4 = ((PyObject *)__pyx_v_self->descr);
-  __Pyx_INCREF(__pyx_t_4);
-  __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4);
-  __pyx_t_4 = 0;
+  __pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
+  __Pyx_INCREF(__pyx_t_3);
+  __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
+  __pyx_t_3 = 0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":247
  *             cdef int offset
  * 
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)             # <<<<<<<<<<<<<<
@@ -7718,7 +7339,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":249
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
  * 
  *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
@@ -7727,14 +7348,16 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
   if (__pyx_t_2) {
-    __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0);
-    __pyx_t_1 = __pyx_t_3;
   } else {
     __pyx_t_1 = __pyx_t_2;
+    goto __pyx_L15_bool_binop_done;
   }
+  __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
+  __pyx_t_1 = __pyx_t_2;
+  __pyx_L15_bool_binop_done:;
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
  *             if not hasfields and not copy_shape:
  *                 # do not call releasebuffer
  *                 info.obj = None             # <<<<<<<<<<<<<<
@@ -7746,11 +7369,11 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GOTREF(__pyx_v_info->obj);
     __Pyx_DECREF(__pyx_v_info->obj);
     __pyx_v_info->obj = Py_None;
-    goto __pyx_L10;
+    goto __pyx_L14;
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
  *             else:
  *                 # need to call releasebuffer
  *                 info.obj = self             # <<<<<<<<<<<<<<
@@ -7763,9 +7386,9 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_DECREF(__pyx_v_info->obj);
     __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
   }
-  __pyx_L10:;
+  __pyx_L14:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
  *                 info.obj = self
  * 
  *             if not hasfields:             # <<<<<<<<<<<<<<
@@ -7775,66 +7398,69 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
  * 
  *             if not hasfields:
  *                 t = descr.type_num             # <<<<<<<<<<<<<<
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  */
-    __pyx_t_5 = __pyx_v_descr->type_num;
-    __pyx_v_t = __pyx_t_5;
+    __pyx_t_4 = __pyx_v_descr->type_num;
+    __pyx_v_t = __pyx_t_4;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
  *             if not hasfields:
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")
  */
-    __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0);
-    if (__pyx_t_1) {
-      __pyx_t_2 = (__pyx_v_little_endian != 0);
+    __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
+    if (!__pyx_t_2) {
+      goto __pyx_L20_next_or;
     } else {
-      __pyx_t_2 = __pyx_t_1;
     }
+    __pyx_t_2 = (__pyx_v_little_endian != 0);
     if (!__pyx_t_2) {
+    } else {
+      __pyx_t_1 = __pyx_t_2;
+      goto __pyx_L19_bool_binop_done;
+    }
+    __pyx_L20_next_or:;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"
  */
-      __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0);
-      if (__pyx_t_1) {
-        __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0);
-        __pyx_t_7 = __pyx_t_3;
-      } else {
-        __pyx_t_7 = __pyx_t_1;
-      }
-      __pyx_t_1 = __pyx_t_7;
+    __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
+    if (__pyx_t_2) {
     } else {
       __pyx_t_1 = __pyx_t_2;
+      goto __pyx_L19_bool_binop_done;
     }
+    __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
+    __pyx_t_1 = __pyx_t_2;
+    __pyx_L19_bool_binop_done:;
     if (__pyx_t_1) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-      __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_4);
-      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
  *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
@@ -7843,7 +7469,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     switch (__pyx_v_t) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"             # <<<<<<<<<<<<<<
@@ -7854,7 +7480,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_b;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"             # <<<<<<<<<<<<<<
@@ -7865,7 +7491,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_B;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"             # <<<<<<<<<<<<<<
@@ -7876,7 +7502,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_h;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"             # <<<<<<<<<<<<<<
@@ -7887,7 +7513,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_H;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"             # <<<<<<<<<<<<<<
@@ -7898,7 +7524,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_i;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"             # <<<<<<<<<<<<<<
@@ -7909,7 +7535,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_I;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"             # <<<<<<<<<<<<<<
@@ -7920,7 +7546,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_l;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"             # <<<<<<<<<<<<<<
@@ -7931,7 +7557,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_L;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"             # <<<<<<<<<<<<<<
@@ -7942,7 +7568,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_q;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"             # <<<<<<<<<<<<<<
@@ -7953,7 +7579,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Q;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"             # <<<<<<<<<<<<<<
@@ -7964,7 +7590,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_f;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"             # <<<<<<<<<<<<<<
@@ -7975,7 +7601,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_d;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"             # <<<<<<<<<<<<<<
@@ -7986,7 +7612,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_g;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"             # <<<<<<<<<<<<<<
@@ -7997,7 +7623,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zf;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"             # <<<<<<<<<<<<<<
@@ -8008,7 +7634,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zd;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"             # <<<<<<<<<<<<<<
@@ -8019,7 +7645,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zg;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
  *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
@@ -8031,33 +7657,33 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       break;
       default:
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
  *                 elif t == NPY_OBJECT:      f = "O"
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *                 info.format = f
  *                 return
  */
-      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_8);
-      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_4);
-      PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8);
-      __Pyx_GIVEREF(__pyx_t_8);
-      __pyx_t_8 = 0;
-      __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_8);
-      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __Pyx_Raise(__pyx_t_8, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_6);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __Pyx_GIVEREF(__pyx_t_6);
+      PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
+      __pyx_t_6 = 0;
+      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_6);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __Pyx_Raise(__pyx_t_6, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       break;
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f             # <<<<<<<<<<<<<<
@@ -8066,7 +7692,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->format = __pyx_v_f;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f
  *                 return             # <<<<<<<<<<<<<<
@@ -8078,7 +7704,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
  *                 return
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)             # <<<<<<<<<<<<<<
@@ -8087,7 +7713,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->format = ((char *)malloc(255));
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment             # <<<<<<<<<<<<<<
@@ -8096,7 +7722,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     (__pyx_v_info->format[0]) = '^';
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0             # <<<<<<<<<<<<<<
@@ -8105,17 +7731,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_offset = 0;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0
  *                 f = _util_dtypestring(descr, info.format + 1,             # <<<<<<<<<<<<<<
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  */
-    __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_v_f = __pyx_t_9;
+    __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_v_f = __pyx_t_7;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  *                 f[0] = c'\0' # Terminate format string             # <<<<<<<<<<<<<<
@@ -8125,7 +7751,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     (__pyx_v_f[0]) = '\x00';
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
  *         # experimental exception made for __getbuffer__ and __releasebuffer__
  *         # -- the details of this may change.
  *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
@@ -8137,8 +7763,8 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_r = 0;
   goto __pyx_L0;
   __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_8);
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_6);
   __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __pyx_r = -1;
   if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
@@ -8157,7 +7783,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -8181,7 +7807,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("__releasebuffer__", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
@@ -8191,7 +7817,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)             # <<<<<<<<<<<<<<
@@ -8203,7 +7829,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   }
   __pyx_L3:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -8213,7 +7839,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":295
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 stdlib.free(info.strides)             # <<<<<<<<<<<<<<
@@ -8225,7 +7851,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   }
   __pyx_L4:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -8237,7 +7863,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __Pyx_RefNannyFinishContext();
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -8254,7 +7880,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
  * 
  * cdef inline object PyArray_MultiIterNew1(a):
  *     return PyArray_MultiIterNew(1, <void*>a)             # <<<<<<<<<<<<<<
@@ -8262,13 +7888,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
  * cdef inline object PyArray_MultiIterNew2(a, b):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -8287,7 +7913,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -8304,7 +7930,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)             # <<<<<<<<<<<<<<
@@ -8312,13 +7938,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -8337,7 +7963,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -8354,7 +7980,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)             # <<<<<<<<<<<<<<
@@ -8362,13 +7988,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -8387,7 +8013,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -8404,7 +8030,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)             # <<<<<<<<<<<<<<
@@ -8412,13 +8038,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -8437,7 +8063,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -8454,7 +8080,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":784
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)             # <<<<<<<<<<<<<<
@@ -8462,13 +8088,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 784; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -8487,7 +8113,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -8512,16 +8138,14 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   int __pyx_t_5;
   int __pyx_t_6;
   int __pyx_t_7;
-  int __pyx_t_8;
-  int __pyx_t_9;
-  long __pyx_t_10;
-  char *__pyx_t_11;
+  long __pyx_t_8;
+  char *__pyx_t_9;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_util_dtypestring", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":793
  *     cdef int delta_offset
  *     cdef tuple i
  *     cdef int endian_detector = 1             # <<<<<<<<<<<<<<
@@ -8530,7 +8154,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   __pyx_v_endian_detector = 1;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
  *     cdef tuple i
  *     cdef int endian_detector = 1
  *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
@@ -8539,7 +8163,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
  *     cdef tuple fields
  * 
  *     for childname in descr.names:             # <<<<<<<<<<<<<<
@@ -8548,33 +8172,38 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   if (unlikely(__pyx_v_descr->names == Py_None)) {
     PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
-    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
   for (;;) {
     if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
     #if CYTHON_COMPILING_IN_CPYTHON
-    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     #else
-    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
     #endif
     __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
     __pyx_t_3 = 0;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
  * 
  *     for childname in descr.names:
  *         fields = descr.fields[childname]             # <<<<<<<<<<<<<<
  *         child, new_offset = fields
  * 
  */
-    __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    if (unlikely(__pyx_v_descr->fields == Py_None)) {
+      PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_3);
-    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
     __pyx_t_3 = 0;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
  *     for childname in descr.names:
  *         fields = descr.fields[childname]
  *         child, new_offset = fields             # <<<<<<<<<<<<<<
@@ -8591,7 +8220,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       if (unlikely(size != 2)) {
         if (size > 2) __Pyx_RaiseTooManyValuesError(2);
         else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       #if CYTHON_COMPILING_IN_CPYTHON
       __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); 
@@ -8599,101 +8228,104 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __Pyx_INCREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_t_4);
       #else
-      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       #endif
     } else {
-      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
     __pyx_t_3 = 0;
     __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
     __pyx_t_4 = 0;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
  *         child, new_offset = fields
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  */
-    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
     if (__pyx_t_6) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":804
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")
  */
-    __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0);
-    if (__pyx_t_6) {
-      __pyx_t_7 = (__pyx_v_little_endian != 0);
+    __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
+    if (!__pyx_t_7) {
+      goto __pyx_L8_next_or;
     } else {
-      __pyx_t_7 = __pyx_t_6;
     }
+    __pyx_t_7 = (__pyx_v_little_endian != 0);
     if (!__pyx_t_7) {
+    } else {
+      __pyx_t_6 = __pyx_t_7;
+      goto __pyx_L7_bool_binop_done;
+    }
+    __pyx_L8_next_or:;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":805
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
  *             raise ValueError(u"Non-native byte order not supported")
  *             # One could encode it in the format string and have Cython
  */
-      __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0);
-      if (__pyx_t_6) {
-        __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0);
-        __pyx_t_9 = __pyx_t_8;
-      } else {
-        __pyx_t_9 = __pyx_t_6;
-      }
-      __pyx_t_6 = __pyx_t_9;
+    __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
+    if (__pyx_t_7) {
     } else {
       __pyx_t_6 = __pyx_t_7;
+      goto __pyx_L7_bool_binop_done;
     }
+    __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
+    __pyx_t_6 = __pyx_t_7;
+    __pyx_L7_bool_binop_done:;
     if (__pyx_t_6) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
  * 
  *         # Output padding bytes
  *         while offset[0] < new_offset:             # <<<<<<<<<<<<<<
@@ -8701,15 +8333,15 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             f += 1
  */
     while (1) {
-      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (!__pyx_t_6) break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":817
  *         # Output padding bytes
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte             # <<<<<<<<<<<<<<
@@ -8718,7 +8350,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
       (__pyx_v_f[0]) = 120;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte
  *             f += 1             # <<<<<<<<<<<<<<
@@ -8727,28 +8359,28 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
       __pyx_v_f = (__pyx_v_f + 1);
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":819
  *             f[0] = 120 # "x"; pad byte
  *             f += 1
  *             offset[0] += 1             # <<<<<<<<<<<<<<
  * 
  *         offset[0] += child.itemsize
  */
-      __pyx_t_10 = 0;
-      (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1);
+      __pyx_t_8 = 0;
+      (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
  *             offset[0] += 1
  * 
  *         offset[0] += child.itemsize             # <<<<<<<<<<<<<<
  * 
  *         if not PyDataType_HASFIELDS(child):
  */
-    __pyx_t_10 = 0;
-    (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize);
+    __pyx_t_8 = 0;
+    (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
  *         offset[0] += child.itemsize
  * 
  *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
@@ -8758,19 +8390,19 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
     if (__pyx_t_6) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
  * 
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num             # <<<<<<<<<<<<<<
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")
  */
-      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
       __pyx_t_4 = 0;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":825
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num
  *             if end - f < 5:             # <<<<<<<<<<<<<<
@@ -8780,357 +8412,357 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
       if (__pyx_t_6) {
 
-        /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+        /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
         __Pyx_Raise(__pyx_t_4, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 98;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"             # <<<<<<<<<<<<<<
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 66;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"             # <<<<<<<<<<<<<<
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 104;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"             # <<<<<<<<<<<<<<
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 72;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 105;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 73;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 108;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 76;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 113;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 81;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"             # <<<<<<<<<<<<<<
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 102;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 100;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"             # <<<<<<<<<<<<<<
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 103;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf             # <<<<<<<<<<<<<<
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
         (__pyx_v_f[1]) = 102;
         __pyx_v_f = (__pyx_v_f + 1);
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":843
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd             # <<<<<<<<<<<<<<
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
         (__pyx_v_f[1]) = 100;
         __pyx_v_f = (__pyx_v_f + 1);
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg             # <<<<<<<<<<<<<<
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
         (__pyx_v_f[1]) = 103;
         __pyx_v_f = (__pyx_v_f + 1);
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"             # <<<<<<<<<<<<<<
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 79;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
       /*else*/ {
 
-        /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
+        /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":847
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *             f += 1
  *         else:
  */
-        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
-        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
-        PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
         __Pyx_GIVEREF(__pyx_t_3);
+        PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
         __pyx_t_3 = 0;
-        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
         __Pyx_Raise(__pyx_t_3, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
-      __pyx_L11:;
+      __pyx_L15:;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":848
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *             f += 1             # <<<<<<<<<<<<<<
@@ -9138,25 +8770,33 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             # Cython ignores struct boundary information ("T{...}"),
  */
       __pyx_v_f = (__pyx_v_f + 1);
-      goto __pyx_L9;
+      goto __pyx_L13;
     }
     /*else*/ {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849
+      /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":852
  *             # Cython ignores struct boundary information ("T{...}"),
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)             # <<<<<<<<<<<<<<
  *     return f
  * 
  */
-      __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __pyx_v_f = __pyx_t_11;
+      __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_v_f = __pyx_t_9;
     }
-    __pyx_L9:;
+    __pyx_L13:;
+
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
+ *     cdef tuple fields
+ * 
+ *     for childname in descr.names:             # <<<<<<<<<<<<<<
+ *         fields = descr.fields[childname]
+ *         child, new_offset = fields
+ */
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":853
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)
  *     return f             # <<<<<<<<<<<<<<
@@ -9166,7 +8806,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   __pyx_r = __pyx_v_f;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -9191,7 +8831,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -9206,7 +8846,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   int __pyx_t_2;
   __Pyx_RefNannySetupContext("set_array_base", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
  * cdef inline void set_array_base(ndarray arr, object base):
  *      cdef PyObject* baseptr
  *      if base is None:             # <<<<<<<<<<<<<<
@@ -9217,7 +8857,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __pyx_t_2 = (__pyx_t_1 != 0);
   if (__pyx_t_2) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
  *      cdef PyObject* baseptr
  *      if base is None:
  *          baseptr = NULL             # <<<<<<<<<<<<<<
@@ -9229,7 +8869,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
  *          baseptr = NULL
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!             # <<<<<<<<<<<<<<
@@ -9238,7 +8878,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
     Py_INCREF(__pyx_v_base);
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":975
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base             # <<<<<<<<<<<<<<
@@ -9249,7 +8889,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   }
   __pyx_L3:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)             # <<<<<<<<<<<<<<
@@ -9258,7 +8898,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   Py_XDECREF(__pyx_v_arr->base);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)
  *      arr.base = baseptr             # <<<<<<<<<<<<<<
@@ -9267,7 +8907,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   __pyx_v_arr->base = __pyx_v_baseptr;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -9279,7 +8919,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __Pyx_RefNannyFinishContext();
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+/* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -9293,7 +8933,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("get_array_base", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
  * 
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:             # <<<<<<<<<<<<<<
@@ -9303,7 +8943,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":981
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:
  *         return None             # <<<<<<<<<<<<<<
@@ -9317,7 +8957,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
+    /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":983
  *         return None
  *     else:
  *         return <object>arr.base             # <<<<<<<<<<<<<<
@@ -9328,7 +8968,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
     goto __pyx_L0;
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -9393,22 +9033,22 @@ static PyObject *__pyx_sq_item_5skbio_9alignment_12_ssw_wrapper_AlignmentStructu
 }
 
 static PyMethodDef __pyx_methods_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure[] = {
-  {__Pyx_NAMESTR("optimal_alignment_score"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_11optimal_alignment_score, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_10optimal_alignment_score)},
-  {__Pyx_NAMESTR("suboptimal_alignment_score"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_13suboptimal_alignment_score, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_12suboptimal_alignment_score)},
-  {__Pyx_NAMESTR("target_begin"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_15target_begin, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_14target_begin)},
-  {__Pyx_NAMESTR("target_end_optimal"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_17target_end_optimal, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_16target_end_optimal)},
-  {__Pyx_NAMESTR("target_end_suboptimal"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_19target_end_suboptimal, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_18target_end_suboptimal)},
-  {__Pyx_NAMESTR("query_begin"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_21query_begin, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_20query_begin)},
-  {__Pyx_NAMESTR("query_end"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_23query_end, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_22query_end)},
-  {__Pyx_NAMESTR("cigar"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_25cigar, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_24cigar)},
-  {__Pyx_NAMESTR("query_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_27query_sequence, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_26query_sequence)},
-  {__Pyx_NAMESTR("target_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_29target_sequence, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_28target_sequence)},
-  {__Pyx_NAMESTR("aligned_query_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_31aligned_query_sequence, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_30aligned_query_sequence)},
-  {__Pyx_NAMESTR("aligned_target_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_33aligned_target_sequence, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_32aligned_target_sequence)},
-  {__Pyx_NAMESTR("set_zero_based"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_35set_zero_based, METH_O, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_34set_zero_based)},
-  {__Pyx_NAMESTR("is_zero_based"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_37is_zero_based, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_36is_zero_based)},
-  {__Pyx_NAMESTR("_get_aligned_sequence"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_39_get_aligned_sequence, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)},
-  {__Pyx_NAMESTR("_tuples_from_cigar"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_41_tuples_from_cigar, METH_NOARGS, __Pyx_DOCSTR(0)},
+  {"optimal_alignment_score", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_11optimal_alignment_score, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_10optimal_alignment_score},
+  {"suboptimal_alignment_score", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_13suboptimal_alignment_score, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_12suboptimal_alignment_score},
+  {"target_begin", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_15target_begin, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_14target_begin},
+  {"target_end_optimal", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_17target_end_optimal, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_16target_end_optimal},
+  {"target_end_suboptimal", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_19target_end_suboptimal, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_18target_end_suboptimal},
+  {"query_begin", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_21query_begin, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_20query_begin},
+  {"query_end", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_23query_end, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_22query_end},
+  {"cigar", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_25cigar, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_24cigar},
+  {"query_sequence", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_27query_sequence, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_26query_sequence},
+  {"target_sequence", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_29target_sequence, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_28target_sequence},
+  {"aligned_query_sequence", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_31aligned_query_sequence, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_30aligned_query_sequence},
+  {"aligned_target_sequence", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_33aligned_target_sequence, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_32aligned_target_sequence},
+  {"set_zero_based", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_35set_zero_based, METH_O, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_34set_zero_based},
+  {"is_zero_based", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_37is_zero_based, METH_NOARGS, __pyx_doc_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_36is_zero_based},
+  {"_get_aligned_sequence", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_39_get_aligned_sequence, METH_VARARGS|METH_KEYWORDS, 0},
+  {"_tuples_from_cigar", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_18AlignmentStructure_41_tuples_from_cigar, METH_NOARGS, 0},
   {0, 0, 0, 0}
 };
 
@@ -9433,7 +9073,7 @@ static PyMappingMethods __pyx_tp_as_mapping_AlignmentStructure = {
 
 static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure = {
   PyVarObject_HEAD_INIT(0, 0)
-  __Pyx_NAMESTR("skbio.alignment._ssw_wrapper.AlignmentStructure"), /*tp_name*/
+  "skbio.alignment._ssw_wrapper.AlignmentStructure", /*tp_name*/
   sizeof(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure), /*tp_basicsize*/
   0, /*tp_itemsize*/
   __pyx_tp_dealloc_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, /*tp_dealloc*/
@@ -9456,7 +9096,7 @@ static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructu
   0, /*tp_setattro*/
   0, /*tp_as_buffer*/
   Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
-  __Pyx_DOCSTR("Wraps the result of an alignment c struct so it is accessible to Python\n\n    Attributes\n    ----------\n    optimal_alignment_score\n    suboptimal_alignment_score\n    target_begin\n    target_end_optimal\n    target_end_suboptimal\n    query_begin\n    query_end\n    cigar\n    query_sequence\n    target_sequence\n    aligned_query_sequence\n    aligned_target_sequence\n\n    Notes\n    -----\n    `cigar` may be empty depending on parameters used.\n\n    `target_begi [...]
+  "Wraps the result of an alignment c struct so it is accessible to Python\n\n    Notes\n    -----\n    `cigar` may be empty depending on parameters used.\n\n    `target_begin` and `query_begin` may be -1 depending on parameters used.\n\n    Developer note: `read_sequence` is an alias for `query_sequence` used by\n    ssw.c as is `reference_sequence` for `target_sequence`\n    ", /*tp_doc*/
   0, /*tp_traverse*/
   0, /*tp_clear*/
   0, /*tp_richcompare*/
@@ -9482,9 +9122,7 @@ static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructu
   0, /*tp_subclasses*/
   0, /*tp_weaklist*/
   0, /*tp_del*/
-  #if PY_VERSION_HEX >= 0x02060000
   0, /*tp_version_tag*/
-  #endif
   #if PY_VERSION_HEX >= 0x030400a1
   0, /*tp_finalize*/
   #endif
@@ -9574,13 +9212,13 @@ static int __pyx_tp_clear_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman(
 }
 
 static PyMethodDef __pyx_methods_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman[] = {
-  {__Pyx_NAMESTR("_get_bit_flag"), (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_7_get_bit_flag, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)},
+  {"_get_bit_flag", (PyCFunction)__pyx_pw_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_7_get_bit_flag, METH_VARARGS|METH_KEYWORDS, 0},
   {0, 0, 0, 0}
 };
 
 static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman = {
   PyVarObject_HEAD_INIT(0, 0)
-  __Pyx_NAMESTR("skbio.alignment._ssw_wrapper.StripedSmithWaterman"), /*tp_name*/
+  "skbio.alignment._ssw_wrapper.StripedSmithWaterman", /*tp_name*/
   sizeof(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman), /*tp_basicsize*/
   0, /*tp_itemsize*/
   __pyx_tp_dealloc_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, /*tp_dealloc*/
@@ -9603,7 +9241,7 @@ static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWate
   0, /*tp_setattro*/
   0, /*tp_as_buffer*/
   Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
-  __Pyx_DOCSTR("Performs a striped (banded) Smith Waterman Alignment.\n\n    First a StripedSmithWaterman object must be instantiated with a query\n    sequence. The resulting object is then callable with a target sequence and\n    may be reused on a large collection of target sequences.\n\n    Parameters\n    ----------\n    query_sequence : string\n        The query sequence, this may be upper or lowercase from the set of\n        {A, C, G, T, N} (nucleotide) or from the set of\n       [...]
+  "Performs a striped (banded) Smith Waterman Alignment.\n\n    First a StripedSmithWaterman object must be instantiated with a query\n    sequence. The resulting object is then callable with a target sequence and\n    may be reused on a large collection of target sequences.\n\n    Parameters\n    ----------\n    query_sequence : string\n        The query sequence, this may be upper or lowercase from the set of\n        {A, C, G, T, N} (nucleotide) or from the set of\n        {A, R, N, D [...]
   __pyx_tp_traverse_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, /*tp_traverse*/
   __pyx_tp_clear_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, /*tp_clear*/
   0, /*tp_richcompare*/
@@ -9629,9 +9267,7 @@ static PyTypeObject __pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWate
   0, /*tp_subclasses*/
   0, /*tp_weaklist*/
   0, /*tp_del*/
-  #if PY_VERSION_HEX >= 0x02060000
   0, /*tp_version_tag*/
-  #endif
   #if PY_VERSION_HEX >= 0x030400a1
   0, /*tp_finalize*/
   #endif
@@ -9648,7 +9284,7 @@ static struct PyModuleDef __pyx_moduledef = {
   #else
     PyModuleDef_HEAD_INIT,
   #endif
-    __Pyx_NAMESTR("_ssw_wrapper"),
+    "_ssw_wrapper",
     0, /* m_doc */
     -1, /* m_size */
     __pyx_methods /* m_methods */,
@@ -9674,11 +9310,10 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_kp_s_Must_provide_a_substitution_matr, __pyx_k_Must_provide_a_substitution_matr, sizeof(__pyx_k_Must_provide_a_substitution_matr), 0, 0, 1, 0},
   {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1},
   {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
-  {&__pyx_n_s_NucleotideSequence, __pyx_k_NucleotideSequence, sizeof(__pyx_k_NucleotideSequence), 0, 0, 1, 1},
-  {&__pyx_n_s_ProteinSequence, __pyx_k_ProteinSequence, sizeof(__pyx_k_ProteinSequence), 0, 0, 1, 1},
+  {&__pyx_n_s_Protein, __pyx_k_Protein, sizeof(__pyx_k_Protein), 0, 0, 1, 1},
   {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
   {&__pyx_kp_s_Score_d, __pyx_k_Score_d, sizeof(__pyx_k_Score_d), 0, 0, 1, 0},
-  {&__pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_k_Users_jairideout_dev_scikit_bio, sizeof(__pyx_k_Users_jairideout_dev_scikit_bio), 0, 0, 1, 0},
+  {&__pyx_n_s_Sequence, __pyx_k_Sequence, sizeof(__pyx_k_Sequence), 0, 0, 1, 1},
   {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
   {&__pyx_kp_s__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 0, 1, 0},
   {&__pyx_kp_s__5, __pyx_k__5, sizeof(__pyx_k__5), 0, 0, 1, 0},
@@ -9686,7 +9321,6 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_kp_s__8, __pyx_k__8, sizeof(__pyx_k__8), 0, 0, 1, 0},
   {&__pyx_n_s_aligned_query_sequence, __pyx_k_aligned_query_sequence, sizeof(__pyx_k_aligned_query_sequence), 0, 0, 1, 1},
   {&__pyx_n_s_aligned_target_sequence, __pyx_k_aligned_target_sequence, sizeof(__pyx_k_aligned_target_sequence), 0, 0, 1, 1},
-  {&__pyx_n_s_alignment, __pyx_k_alignment, sizeof(__pyx_k_alignment), 0, 0, 1, 1},
   {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1},
   {&__pyx_n_s_begin, __pyx_k_begin, sizeof(__pyx_k_begin), 0, 0, 1, 1},
   {&__pyx_n_s_cigar, __pyx_k_cigar, sizeof(__pyx_k_cigar), 0, 0, 1, 1},
@@ -9701,18 +9335,14 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_n_s_gap_open_penalty, __pyx_k_gap_open_penalty, sizeof(__pyx_k_gap_open_penalty), 0, 0, 1, 1},
   {&__pyx_kp_s_gap_open_penalty_must_be_0, __pyx_k_gap_open_penalty_must_be_0, sizeof(__pyx_k_gap_open_penalty_must_be_0), 0, 0, 1, 0},
   {&__pyx_n_s_gap_type, __pyx_k_gap_type, sizeof(__pyx_k_gap_type), 0, 0, 1, 1},
-  {&__pyx_n_s_get, __pyx_k_get, sizeof(__pyx_k_get), 0, 0, 1, 1},
   {&__pyx_n_s_get_aligned_sequence, __pyx_k_get_aligned_sequence, sizeof(__pyx_k_get_aligned_sequence), 0, 0, 1, 1},
   {&__pyx_n_s_get_bit_flag, __pyx_k_get_bit_flag, sizeof(__pyx_k_get_bit_flag), 0, 0, 1, 1},
-  {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
   {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
   {&__pyx_n_s_index_starts_at, __pyx_k_index_starts_at, sizeof(__pyx_k_index_starts_at), 0, 0, 1, 1},
   {&__pyx_n_s_int8, __pyx_k_int8, sizeof(__pyx_k_int8), 0, 0, 1, 1},
   {&__pyx_n_s_is_zero_based, __pyx_k_is_zero_based, sizeof(__pyx_k_is_zero_based), 0, 0, 1, 1},
   {&__pyx_n_s_isdigit, __pyx_k_isdigit, sizeof(__pyx_k_isdigit), 0, 0, 1, 1},
   {&__pyx_n_s_join, __pyx_k_join, sizeof(__pyx_k_join), 0, 0, 1, 1},
-  {&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1},
-  {&__pyx_n_s_local_pairwise_align_ssw, __pyx_k_local_pairwise_align_ssw, sizeof(__pyx_k_local_pairwise_align_ssw), 0, 0, 1, 1},
   {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
   {&__pyx_n_s_mask_auto, __pyx_k_mask_auto, sizeof(__pyx_k_mask_auto), 0, 0, 1, 1},
   {&__pyx_n_s_mask_length, __pyx_k_mask_length, sizeof(__pyx_k_mask_length), 0, 0, 1, 1},
@@ -9730,10 +9360,7 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_n_s_override_skip_babp, __pyx_k_override_skip_babp, sizeof(__pyx_k_override_skip_babp), 0, 0, 1, 1},
   {&__pyx_n_s_property, __pyx_k_property, sizeof(__pyx_k_property), 0, 0, 1, 1},
   {&__pyx_n_s_protein, __pyx_k_protein, sizeof(__pyx_k_protein), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1},
   {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
-  {&__pyx_n_s_query, __pyx_k_query, sizeof(__pyx_k_query), 0, 0, 1, 1},
   {&__pyx_n_s_query_begin, __pyx_k_query_begin, sizeof(__pyx_k_query_begin), 0, 0, 1, 1},
   {&__pyx_n_s_query_end, __pyx_k_query_end, sizeof(__pyx_k_query_end), 0, 0, 1, 1},
   {&__pyx_n_s_query_sequence, __pyx_k_query_sequence, sizeof(__pyx_k_query_sequence), 0, 0, 1, 1},
@@ -9742,24 +9369,16 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_n_s_read_sequence, __pyx_k_read_sequence, sizeof(__pyx_k_read_sequence), 0, 0, 1, 1},
   {&__pyx_n_s_reference_sequence, __pyx_k_reference_sequence, sizeof(__pyx_k_reference_sequence), 0, 0, 1, 1},
   {&__pyx_kp_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 0},
-  {&__pyx_n_s_score, __pyx_k_score, sizeof(__pyx_k_score), 0, 0, 1, 1},
   {&__pyx_n_s_score_filter, __pyx_k_score_filter, sizeof(__pyx_k_score_filter), 0, 0, 1, 1},
   {&__pyx_n_s_score_only, __pyx_k_score_only, sizeof(__pyx_k_score_only), 0, 0, 1, 1},
   {&__pyx_n_s_score_size, __pyx_k_score_size, sizeof(__pyx_k_score_size), 0, 0, 1, 1},
-  {&__pyx_n_s_seqs, __pyx_k_seqs, sizeof(__pyx_k_seqs), 0, 0, 1, 1},
   {&__pyx_n_s_sequence, __pyx_k_sequence, sizeof(__pyx_k_sequence), 0, 0, 1, 1},
-  {&__pyx_n_s_sequence1, __pyx_k_sequence1, sizeof(__pyx_k_sequence1), 0, 0, 1, 1},
-  {&__pyx_n_s_sequence2, __pyx_k_sequence2, sizeof(__pyx_k_sequence2), 0, 0, 1, 1},
   {&__pyx_n_s_set_zero_based, __pyx_k_set_zero_based, sizeof(__pyx_k_set_zero_based), 0, 0, 1, 1},
   {&__pyx_n_s_skbio_alignment, __pyx_k_skbio_alignment, sizeof(__pyx_k_skbio_alignment), 0, 0, 1, 1},
-  {&__pyx_n_s_skbio_alignment__ssw_wrapper, __pyx_k_skbio_alignment__ssw_wrapper, sizeof(__pyx_k_skbio_alignment__ssw_wrapper), 0, 0, 1, 1},
   {&__pyx_n_s_skbio_sequence, __pyx_k_skbio_sequence, sizeof(__pyx_k_skbio_sequence), 0, 0, 1, 1},
-  {&__pyx_n_s_start_end, __pyx_k_start_end, sizeof(__pyx_k_start_end), 0, 0, 1, 1},
-  {&__pyx_n_s_start_end_positions, __pyx_k_start_end_positions, sizeof(__pyx_k_start_end_positions), 0, 0, 1, 1},
   {&__pyx_n_s_suboptimal_alignment_score, __pyx_k_suboptimal_alignment_score, sizeof(__pyx_k_suboptimal_alignment_score), 0, 0, 1, 1},
   {&__pyx_n_s_substitution_matrix, __pyx_k_substitution_matrix, sizeof(__pyx_k_substitution_matrix), 0, 0, 1, 1},
   {&__pyx_n_s_suppress_sequences, __pyx_k_suppress_sequences, sizeof(__pyx_k_suppress_sequences), 0, 0, 1, 1},
-  {&__pyx_n_s_target, __pyx_k_target, sizeof(__pyx_k_target), 0, 0, 1, 1},
   {&__pyx_n_s_target_begin, __pyx_k_target_begin, sizeof(__pyx_k_target_begin), 0, 0, 1, 1},
   {&__pyx_n_s_target_end_optimal, __pyx_k_target_end_optimal, sizeof(__pyx_k_target_end_optimal), 0, 0, 1, 1},
   {&__pyx_n_s_target_end_suboptimal, __pyx_k_target_end_suboptimal, sizeof(__pyx_k_target_end_suboptimal), 0, 0, 1, 1},
@@ -9772,13 +9391,13 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {0, 0, 0, 0, 0, 0, 0}
 };
 static int __Pyx_InitCachedBuiltins(void) {
-  __pyx_builtin_property = __Pyx_GetBuiltinName(__pyx_n_s_property); if (!__pyx_builtin_property) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_Exception = __Pyx_GetBuiltinName(__pyx_n_s_Exception); if (!__pyx_builtin_Exception) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_ord = __Pyx_GetBuiltinName(__pyx_n_s_ord); if (!__pyx_builtin_ord) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_property = __Pyx_GetBuiltinName(__pyx_n_s_property); if (!__pyx_builtin_property) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 266; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_Exception = __Pyx_GetBuiltinName(__pyx_n_s_Exception); if (!__pyx_builtin_Exception) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_ord = __Pyx_GetBuiltinName(__pyx_n_s_ord); if (!__pyx_builtin_ord) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   return 0;
   __pyx_L1_error:;
   return -1;
@@ -9788,149 +9407,137 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":140
+  /* "skbio/alignment/_ssw_wrapper.pyx":125
  *             align_len = len(query)
  *             if align_len > 13:
  *                 target = target[:10] + "..."             # <<<<<<<<<<<<<<
  *                 query = query[:10] + "..."
  * 
  */
-  __pyx_slice__2 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_slice__2 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_slice__2);
   __Pyx_GIVEREF(__pyx_slice__2);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":141
+  /* "skbio/alignment/_ssw_wrapper.pyx":126
  *             if align_len > 13:
  *                 target = target[:10] + "..."
  *                 query = query[:10] + "..."             # <<<<<<<<<<<<<<
  * 
  *             length = "Length: %d" % align_len
  */
-  __pyx_slice__4 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_slice__4 = PySlice_New(Py_None, __pyx_int_10, Py_None); if (unlikely(!__pyx_slice__4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_slice__4);
   __Pyx_GIVEREF(__pyx_slice__4);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":386
+  /* "skbio/alignment/_ssw_wrapper.pyx":371
  *         # Save the original index scheme and then set it to 0 (1/2)
  *         orig_z_base = self.is_zero_based()
  *         self.set_zero_based(True)             # <<<<<<<<<<<<<<
  *         aligned_sequence = []
  *         seq = sequence[begin:end + 1]
  */
-  __pyx_tuple__7 = PyTuple_Pack(1, Py_True); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__7 = PyTuple_Pack(1, Py_True); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__7);
   __Pyx_GIVEREF(__pyx_tuple__7);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":577
+  /* "skbio/alignment/_ssw_wrapper.pyx":562
  *         self.read_sequence = query_sequence
  *         if gap_open_penalty <= 0:
  *             raise ValueError("`gap_open_penalty` must be > 0")             # <<<<<<<<<<<<<<
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:
  */
-  __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_gap_open_penalty_must_be_0); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_gap_open_penalty_must_be_0); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__9);
   __Pyx_GIVEREF(__pyx_tuple__9);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":580
+  /* "skbio/alignment/_ssw_wrapper.pyx":565
  *         self.gap_open_penalty = gap_open_penalty
  *         if gap_extend_penalty <= 0:
  *             raise ValueError("`gap_extend_penalty` must be > 0")             # <<<<<<<<<<<<<<
  *         self.gap_extend_penalty = gap_extend_penalty
  *         self.distance_filter = 0 if distance_filter is None else \
  */
-  __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_gap_extend_penalty_must_be_0); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_gap_extend_penalty_must_be_0); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 565; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__10);
   __Pyx_GIVEREF(__pyx_tuple__10);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":595
+  /* "skbio/alignment/_ssw_wrapper.pyx":580
  *         if substitution_matrix is None:
  *             if protein:
  *                 raise Exception("Must provide a substitution matrix for"             # <<<<<<<<<<<<<<
  *                                 " protein sequences")
  *             matrix = self._build_match_matrix(match_score, mismatch_score)
  */
-  __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Must_provide_a_substitution_matr); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Must_provide_a_substitution_matr); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__11);
   __Pyx_GIVEREF(__pyx_tuple__11);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  */
-  __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__12);
   __Pyx_GIVEREF(__pyx_tuple__12);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
  * 
  *             info.buf = PyArray_DATA(self)
  */
-  __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__13);
   __Pyx_GIVEREF(__pyx_tuple__13);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-  __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__14);
   __Pyx_GIVEREF(__pyx_tuple__14);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-  __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__15);
   __Pyx_GIVEREF(__pyx_tuple__15);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-  __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__16)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__16)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__16);
   __Pyx_GIVEREF(__pyx_tuple__16);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-  __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__17);
   __Pyx_GIVEREF(__pyx_tuple__17);
-
-  /* "skbio/alignment/_ssw_wrapper.pyx":731
- * 
- * 
- * def local_pairwise_align_ssw(sequence1, sequence2,             # <<<<<<<<<<<<<<
- *                              **kwargs):
- *     """Align query and target sequences with Striped Smith-Waterman.
- */
-  __pyx_tuple__18 = PyTuple_Pack(7, __pyx_n_s_sequence1, __pyx_n_s_sequence2, __pyx_n_s_kwargs, __pyx_n_s_query, __pyx_n_s_alignment, __pyx_n_s_start_end, __pyx_n_s_seqs); if (unlikely(!__pyx_tuple__18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_tuple__18);
-  __Pyx_GIVEREF(__pyx_tuple__18);
-  __pyx_codeobj__19 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__18, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_n_s_local_pairwise_align_ssw, 731, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_RefNannyFinishContext();
   return 0;
   __pyx_L1_error:;
@@ -9982,6 +9589,8 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   PyObject *__pyx_t_1 = NULL;
   PyObject *__pyx_t_2 = NULL;
   PyObject *__pyx_t_3 = NULL;
+  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_5 = NULL;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
@@ -10017,25 +9626,25 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   #endif
   /*--- Module creation code ---*/
   #if PY_MAJOR_VERSION < 3
-  __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_ssw_wrapper"), __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+  __pyx_m = Py_InitModule4("_ssw_wrapper", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
   #else
   __pyx_m = PyModule_Create(&__pyx_moduledef);
   #endif
   if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   Py_INCREF(__pyx_d);
-  __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #if CYTHON_COMPILING_IN_PYPY
   Py_INCREF(__pyx_b);
   #endif
-  if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
   /*--- Initialize various global constants etc. ---*/
   if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
   if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
   if (__pyx_module_is_main_skbio__alignment___ssw_wrapper) {
-    if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
   }
   #if PY_MAJOR_VERSION >= 3
   {
@@ -10058,17 +9667,17 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   if (PyType_Ready(&__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure.tp_print = 0;
   if (__Pyx_SetVtable(__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure.tp_dict, __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  if (__Pyx_SetAttrString(__pyx_m, "AlignmentStructure", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyObject_SetAttrString(__pyx_m, "AlignmentStructure", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure = &__pyx_type_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure;
   __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman = &__pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
   __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman._seq_converter = (PyArrayObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__seq_converter;
   __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman._build_match_matrix = (PyArrayObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *, PyObject *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__build_match_matrix;
   __pyx_vtable_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman._convert_dict2d_to_matrix = (PyArrayObject *(*)(struct __pyx_obj_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman *, PyObject *))__pyx_f_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman__convert_dict2d_to_matrix;
-  if (PyType_Ready(&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyType_Ready(&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman.tp_print = 0;
   #if CYTHON_COMPILING_IN_CPYTHON
   {
-    PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, "__call__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    PyObject *wrapper = PyObject_GetAttrString((PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman, "__call__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {
       __pyx_wrapperbase_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__ = *((PyWrapperDescrObject *)wrapper)->d_base;
       __pyx_wrapperbase_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__.doc = __pyx_doc_5skbio_9alignment_12_ssw_wrapper_20StripedSmithWaterman_2__call__;
@@ -10076,8 +9685,8 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
     }
   }
   #endif
-  if (__Pyx_SetVtable(__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman.tp_dict, __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  if (__Pyx_SetAttrString(__pyx_m, "StripedSmithWaterman", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (__Pyx_SetVtable(__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman.tp_dict, __pyx_vtabptr_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyObject_SetAttrString(__pyx_m, "StripedSmithWaterman", (PyObject *)&__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman = &__pyx_type_5skbio_9alignment_12_ssw_wrapper_StripedSmithWaterman;
   /*--- Type import code ---*/
   __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", 
@@ -10090,10 +9699,10 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Variable import code ---*/
   /*--- Function import code ---*/
   /*--- Execution code ---*/
@@ -10114,14 +9723,14 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
  * import numpy as np
  * cimport numpy as cnp
  * from skbio.alignment import Alignment             # <<<<<<<<<<<<<<
- * from skbio.sequence import ProteinSequence, NucleotideSequence
+ * from skbio.sequence import Protein, Sequence
  * 
  */
   __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_n_s_Alignment);
-  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_Alignment);
   __Pyx_GIVEREF(__pyx_n_s_Alignment);
+  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_Alignment);
   __pyx_t_2 = __Pyx_Import(__pyx_n_s_skbio_alignment, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
@@ -10134,28 +9743,28 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   /* "skbio/alignment/_ssw_wrapper.pyx":13
  * cimport numpy as cnp
  * from skbio.alignment import Alignment
- * from skbio.sequence import ProteinSequence, NucleotideSequence             # <<<<<<<<<<<<<<
+ * from skbio.sequence import Protein, Sequence             # <<<<<<<<<<<<<<
  * 
  * cdef extern from "_lib/ssw.h":
  */
   __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_INCREF(__pyx_n_s_ProteinSequence);
-  PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_ProteinSequence);
-  __Pyx_GIVEREF(__pyx_n_s_ProteinSequence);
-  __Pyx_INCREF(__pyx_n_s_NucleotideSequence);
-  PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_NucleotideSequence);
-  __Pyx_GIVEREF(__pyx_n_s_NucleotideSequence);
+  __Pyx_INCREF(__pyx_n_s_Protein);
+  __Pyx_GIVEREF(__pyx_n_s_Protein);
+  PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_Protein);
+  __Pyx_INCREF(__pyx_n_s_Sequence);
+  __Pyx_GIVEREF(__pyx_n_s_Sequence);
+  PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_Sequence);
   __pyx_t_1 = __Pyx_Import(__pyx_n_s_skbio_sequence, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_ProteinSequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_Protein); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_ProteinSequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_Protein, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_NucleotideSequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_Sequence); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_NucleotideSequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_Sequence, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
@@ -10166,405 +9775,422 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
  *     23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
  *     23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
  */
-  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  __pyx_t_2 = PyList_New(128); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyList_New(128); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 0, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 1, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 2, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 3, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 3, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 4, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 4, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 5, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 5, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 6, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 6, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 7, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 7, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 8, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 8, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 9, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 9, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 10, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 10, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 11, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 11, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 12, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 12, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 13, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 13, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 14, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 14, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 15, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 15, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 16, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 16, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 17, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 17, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 18, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 18, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 19, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 19, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 20, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 20, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 21, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 21, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 22, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 22, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 23, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 23, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 24, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 24, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 25, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 25, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 26, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 26, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 27, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 27, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 28, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 28, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 29, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 29, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 30, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 30, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 31, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 31, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 32, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 32, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 33, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 33, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 34, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 34, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 35, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 35, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 36, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 36, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 37, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 37, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 38, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 38, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 39, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 39, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 40, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 40, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 41, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 41, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 42, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 42, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 43, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 43, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 44, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 44, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 45, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 45, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 46, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 46, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 47, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 47, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 48, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 48, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 49, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 49, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 50, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 50, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 51, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 51, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 52, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 52, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 53, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 53, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 54, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 54, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 55, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 55, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 56, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 56, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 57, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 57, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 58, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 58, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 59, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 59, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 60, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 60, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 61, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 61, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 62, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 62, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 63, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 63, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 64, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 64, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_0);
-  PyList_SET_ITEM(__pyx_t_1, 65, __pyx_int_0);
   __Pyx_GIVEREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_2, 65, __pyx_int_0);
   __Pyx_INCREF(__pyx_int_20);
-  PyList_SET_ITEM(__pyx_t_1, 66, __pyx_int_20);
   __Pyx_GIVEREF(__pyx_int_20);
+  PyList_SET_ITEM(__pyx_t_2, 66, __pyx_int_20);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 67, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_2, 67, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_3);
-  PyList_SET_ITEM(__pyx_t_1, 68, __pyx_int_3);
   __Pyx_GIVEREF(__pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_2, 68, __pyx_int_3);
   __Pyx_INCREF(__pyx_int_6);
-  PyList_SET_ITEM(__pyx_t_1, 69, __pyx_int_6);
   __Pyx_GIVEREF(__pyx_int_6);
+  PyList_SET_ITEM(__pyx_t_2, 69, __pyx_int_6);
   __Pyx_INCREF(__pyx_int_13);
-  PyList_SET_ITEM(__pyx_t_1, 70, __pyx_int_13);
   __Pyx_GIVEREF(__pyx_int_13);
+  PyList_SET_ITEM(__pyx_t_2, 70, __pyx_int_13);
   __Pyx_INCREF(__pyx_int_7);
-  PyList_SET_ITEM(__pyx_t_1, 71, __pyx_int_7);
   __Pyx_GIVEREF(__pyx_int_7);
+  PyList_SET_ITEM(__pyx_t_2, 71, __pyx_int_7);
   __Pyx_INCREF(__pyx_int_8);
-  PyList_SET_ITEM(__pyx_t_1, 72, __pyx_int_8);
   __Pyx_GIVEREF(__pyx_int_8);
+  PyList_SET_ITEM(__pyx_t_2, 72, __pyx_int_8);
   __Pyx_INCREF(__pyx_int_9);
-  PyList_SET_ITEM(__pyx_t_1, 73, __pyx_int_9);
   __Pyx_GIVEREF(__pyx_int_9);
+  PyList_SET_ITEM(__pyx_t_2, 73, __pyx_int_9);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 74, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 74, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_11);
-  PyList_SET_ITEM(__pyx_t_1, 75, __pyx_int_11);
   __Pyx_GIVEREF(__pyx_int_11);
+  PyList_SET_ITEM(__pyx_t_2, 75, __pyx_int_11);
   __Pyx_INCREF(__pyx_int_10);
-  PyList_SET_ITEM(__pyx_t_1, 76, __pyx_int_10);
   __Pyx_GIVEREF(__pyx_int_10);
+  PyList_SET_ITEM(__pyx_t_2, 76, __pyx_int_10);
   __Pyx_INCREF(__pyx_int_12);
-  PyList_SET_ITEM(__pyx_t_1, 77, __pyx_int_12);
   __Pyx_GIVEREF(__pyx_int_12);
+  PyList_SET_ITEM(__pyx_t_2, 77, __pyx_int_12);
   __Pyx_INCREF(__pyx_int_2);
-  PyList_SET_ITEM(__pyx_t_1, 78, __pyx_int_2);
   __Pyx_GIVEREF(__pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_2, 78, __pyx_int_2);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 79, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 79, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_14);
-  PyList_SET_ITEM(__pyx_t_1, 80, __pyx_int_14);
   __Pyx_GIVEREF(__pyx_int_14);
+  PyList_SET_ITEM(__pyx_t_2, 80, __pyx_int_14);
   __Pyx_INCREF(__pyx_int_5);
-  PyList_SET_ITEM(__pyx_t_1, 81, __pyx_int_5);
   __Pyx_GIVEREF(__pyx_int_5);
+  PyList_SET_ITEM(__pyx_t_2, 81, __pyx_int_5);
   __Pyx_INCREF(__pyx_int_1);
-  PyList_SET_ITEM(__pyx_t_1, 82, __pyx_int_1);
   __Pyx_GIVEREF(__pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_2, 82, __pyx_int_1);
   __Pyx_INCREF(__pyx_int_15);
-  PyList_SET_ITEM(__pyx_t_1, 83, __pyx_int_15);
   __Pyx_GIVEREF(__pyx_int_15);
+  PyList_SET_ITEM(__pyx_t_2, 83, __pyx_int_15);
   __Pyx_INCREF(__pyx_int_16);
-  PyList_SET_ITEM(__pyx_t_1, 84, __pyx_int_16);
   __Pyx_GIVEREF(__pyx_int_16);
+  PyList_SET_ITEM(__pyx_t_2, 84, __pyx_int_16);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 85, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 85, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_19);
-  PyList_SET_ITEM(__pyx_t_1, 86, __pyx_int_19);
   __Pyx_GIVEREF(__pyx_int_19);
+  PyList_SET_ITEM(__pyx_t_2, 86, __pyx_int_19);
   __Pyx_INCREF(__pyx_int_17);
-  PyList_SET_ITEM(__pyx_t_1, 87, __pyx_int_17);
   __Pyx_GIVEREF(__pyx_int_17);
+  PyList_SET_ITEM(__pyx_t_2, 87, __pyx_int_17);
   __Pyx_INCREF(__pyx_int_22);
-  PyList_SET_ITEM(__pyx_t_1, 88, __pyx_int_22);
   __Pyx_GIVEREF(__pyx_int_22);
+  PyList_SET_ITEM(__pyx_t_2, 88, __pyx_int_22);
   __Pyx_INCREF(__pyx_int_18);
-  PyList_SET_ITEM(__pyx_t_1, 89, __pyx_int_18);
   __Pyx_GIVEREF(__pyx_int_18);
+  PyList_SET_ITEM(__pyx_t_2, 89, __pyx_int_18);
   __Pyx_INCREF(__pyx_int_21);
-  PyList_SET_ITEM(__pyx_t_1, 90, __pyx_int_21);
   __Pyx_GIVEREF(__pyx_int_21);
+  PyList_SET_ITEM(__pyx_t_2, 90, __pyx_int_21);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 91, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 91, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 92, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 92, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 93, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 93, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 94, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 94, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 95, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 95, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 96, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 96, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_0);
-  PyList_SET_ITEM(__pyx_t_1, 97, __pyx_int_0);
   __Pyx_GIVEREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_2, 97, __pyx_int_0);
   __Pyx_INCREF(__pyx_int_20);
-  PyList_SET_ITEM(__pyx_t_1, 98, __pyx_int_20);
   __Pyx_GIVEREF(__pyx_int_20);
+  PyList_SET_ITEM(__pyx_t_2, 98, __pyx_int_20);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 99, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_2, 99, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_3);
-  PyList_SET_ITEM(__pyx_t_1, 100, __pyx_int_3);
   __Pyx_GIVEREF(__pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_2, 100, __pyx_int_3);
   __Pyx_INCREF(__pyx_int_6);
-  PyList_SET_ITEM(__pyx_t_1, 101, __pyx_int_6);
   __Pyx_GIVEREF(__pyx_int_6);
+  PyList_SET_ITEM(__pyx_t_2, 101, __pyx_int_6);
   __Pyx_INCREF(__pyx_int_13);
-  PyList_SET_ITEM(__pyx_t_1, 102, __pyx_int_13);
   __Pyx_GIVEREF(__pyx_int_13);
+  PyList_SET_ITEM(__pyx_t_2, 102, __pyx_int_13);
   __Pyx_INCREF(__pyx_int_7);
-  PyList_SET_ITEM(__pyx_t_1, 103, __pyx_int_7);
   __Pyx_GIVEREF(__pyx_int_7);
+  PyList_SET_ITEM(__pyx_t_2, 103, __pyx_int_7);
   __Pyx_INCREF(__pyx_int_8);
-  PyList_SET_ITEM(__pyx_t_1, 104, __pyx_int_8);
   __Pyx_GIVEREF(__pyx_int_8);
+  PyList_SET_ITEM(__pyx_t_2, 104, __pyx_int_8);
   __Pyx_INCREF(__pyx_int_9);
-  PyList_SET_ITEM(__pyx_t_1, 105, __pyx_int_9);
   __Pyx_GIVEREF(__pyx_int_9);
+  PyList_SET_ITEM(__pyx_t_2, 105, __pyx_int_9);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 106, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 106, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_11);
-  PyList_SET_ITEM(__pyx_t_1, 107, __pyx_int_11);
   __Pyx_GIVEREF(__pyx_int_11);
+  PyList_SET_ITEM(__pyx_t_2, 107, __pyx_int_11);
   __Pyx_INCREF(__pyx_int_10);
-  PyList_SET_ITEM(__pyx_t_1, 108, __pyx_int_10);
   __Pyx_GIVEREF(__pyx_int_10);
+  PyList_SET_ITEM(__pyx_t_2, 108, __pyx_int_10);
   __Pyx_INCREF(__pyx_int_12);
-  PyList_SET_ITEM(__pyx_t_1, 109, __pyx_int_12);
   __Pyx_GIVEREF(__pyx_int_12);
+  PyList_SET_ITEM(__pyx_t_2, 109, __pyx_int_12);
   __Pyx_INCREF(__pyx_int_2);
-  PyList_SET_ITEM(__pyx_t_1, 110, __pyx_int_2);
   __Pyx_GIVEREF(__pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_2, 110, __pyx_int_2);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 111, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 111, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_14);
-  PyList_SET_ITEM(__pyx_t_1, 112, __pyx_int_14);
   __Pyx_GIVEREF(__pyx_int_14);
+  PyList_SET_ITEM(__pyx_t_2, 112, __pyx_int_14);
   __Pyx_INCREF(__pyx_int_5);
-  PyList_SET_ITEM(__pyx_t_1, 113, __pyx_int_5);
   __Pyx_GIVEREF(__pyx_int_5);
+  PyList_SET_ITEM(__pyx_t_2, 113, __pyx_int_5);
   __Pyx_INCREF(__pyx_int_1);
-  PyList_SET_ITEM(__pyx_t_1, 114, __pyx_int_1);
   __Pyx_GIVEREF(__pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_2, 114, __pyx_int_1);
   __Pyx_INCREF(__pyx_int_15);
-  PyList_SET_ITEM(__pyx_t_1, 115, __pyx_int_15);
   __Pyx_GIVEREF(__pyx_int_15);
+  PyList_SET_ITEM(__pyx_t_2, 115, __pyx_int_15);
   __Pyx_INCREF(__pyx_int_16);
-  PyList_SET_ITEM(__pyx_t_1, 116, __pyx_int_16);
   __Pyx_GIVEREF(__pyx_int_16);
+  PyList_SET_ITEM(__pyx_t_2, 116, __pyx_int_16);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 117, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 117, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_19);
-  PyList_SET_ITEM(__pyx_t_1, 118, __pyx_int_19);
   __Pyx_GIVEREF(__pyx_int_19);
+  PyList_SET_ITEM(__pyx_t_2, 118, __pyx_int_19);
   __Pyx_INCREF(__pyx_int_17);
-  PyList_SET_ITEM(__pyx_t_1, 119, __pyx_int_17);
   __Pyx_GIVEREF(__pyx_int_17);
+  PyList_SET_ITEM(__pyx_t_2, 119, __pyx_int_17);
   __Pyx_INCREF(__pyx_int_22);
-  PyList_SET_ITEM(__pyx_t_1, 120, __pyx_int_22);
   __Pyx_GIVEREF(__pyx_int_22);
+  PyList_SET_ITEM(__pyx_t_2, 120, __pyx_int_22);
   __Pyx_INCREF(__pyx_int_18);
-  PyList_SET_ITEM(__pyx_t_1, 121, __pyx_int_18);
   __Pyx_GIVEREF(__pyx_int_18);
+  PyList_SET_ITEM(__pyx_t_2, 121, __pyx_int_18);
   __Pyx_INCREF(__pyx_int_21);
-  PyList_SET_ITEM(__pyx_t_1, 122, __pyx_int_21);
   __Pyx_GIVEREF(__pyx_int_21);
+  PyList_SET_ITEM(__pyx_t_2, 122, __pyx_int_21);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 123, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 123, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 124, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 124, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 125, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 125, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 126, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
+  PyList_SET_ITEM(__pyx_t_2, 126, __pyx_int_23);
   __Pyx_INCREF(__pyx_int_23);
-  PyList_SET_ITEM(__pyx_t_1, 127, __pyx_int_23);
   __Pyx_GIVEREF(__pyx_int_23);
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  PyList_SET_ITEM(__pyx_t_2, 127, __pyx_int_23);
+  __pyx_t_4 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
+    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
+    if (likely(__pyx_t_4)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+      __Pyx_INCREF(__pyx_t_4);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_3, function);
+    }
+  }
+  if (!__pyx_t_4) {
+    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+    __Pyx_GOTREF(__pyx_t_1);
+  } else {
+    __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_5);
+    __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL;
+    __Pyx_GIVEREF(__pyx_t_2);
+    PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2);
+    __pyx_t_2 = 0;
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  }
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_np_aa_table, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
@@ -10576,406 +10202,423 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
  *     4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
  *     4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,  4,
  */
-  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __pyx_t_3 = PyList_New(128); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyList_New(128); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 0, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 2, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 3, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 3, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 4, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 4, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 5, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 5, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 6, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 6, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 7, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 7, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 8, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 8, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 9, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 9, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 10, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 10, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 11, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 11, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 12, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 12, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 13, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 13, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 14, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 14, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 15, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 15, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 16, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 16, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 17, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 17, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 18, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 18, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 19, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 19, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 20, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 20, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 21, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 21, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 22, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 22, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 23, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 23, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 24, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 24, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 25, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 25, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 26, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 26, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 27, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 27, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 28, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 28, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 29, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 29, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 30, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 30, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 31, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 31, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 32, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 32, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 33, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 33, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 34, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 34, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 35, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 35, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 36, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 36, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 37, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 37, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 38, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 38, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 39, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 39, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 40, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 40, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 41, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 41, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 42, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 42, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 43, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 43, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 44, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 44, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 45, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 45, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 46, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 46, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 47, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 47, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 48, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 48, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 49, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 49, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 50, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 50, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 51, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 51, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 52, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 52, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 53, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 53, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 54, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 54, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 55, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 55, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 56, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 56, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 57, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 57, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 58, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 58, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 59, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 59, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 60, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 60, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 61, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 61, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 62, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 62, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 63, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 63, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 64, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 64, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_0);
-  PyList_SET_ITEM(__pyx_t_1, 65, __pyx_int_0);
   __Pyx_GIVEREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_3, 65, __pyx_int_0);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 66, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 66, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_1);
-  PyList_SET_ITEM(__pyx_t_1, 67, __pyx_int_1);
   __Pyx_GIVEREF(__pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_3, 67, __pyx_int_1);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 68, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 68, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 69, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 69, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 70, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 70, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_2);
-  PyList_SET_ITEM(__pyx_t_1, 71, __pyx_int_2);
   __Pyx_GIVEREF(__pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_3, 71, __pyx_int_2);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 72, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 72, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 73, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 73, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 74, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 74, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 75, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 75, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 76, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 76, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 77, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 77, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 78, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 78, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 79, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 79, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 80, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 80, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 81, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 81, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 82, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 82, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 83, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 83, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_3);
-  PyList_SET_ITEM(__pyx_t_1, 84, __pyx_int_3);
   __Pyx_GIVEREF(__pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_3, 84, __pyx_int_3);
   __Pyx_INCREF(__pyx_int_0);
-  PyList_SET_ITEM(__pyx_t_1, 85, __pyx_int_0);
   __Pyx_GIVEREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_3, 85, __pyx_int_0);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 86, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 86, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 87, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 87, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 88, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 88, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 89, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 89, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 90, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 90, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 91, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 91, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 92, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 92, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 93, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 93, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 94, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 94, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 95, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 95, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 96, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 96, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_0);
-  PyList_SET_ITEM(__pyx_t_1, 97, __pyx_int_0);
   __Pyx_GIVEREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_3, 97, __pyx_int_0);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 98, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 98, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_1);
-  PyList_SET_ITEM(__pyx_t_1, 99, __pyx_int_1);
   __Pyx_GIVEREF(__pyx_int_1);
+  PyList_SET_ITEM(__pyx_t_3, 99, __pyx_int_1);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 100, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 100, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 101, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 101, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 102, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 102, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_2);
-  PyList_SET_ITEM(__pyx_t_1, 103, __pyx_int_2);
   __Pyx_GIVEREF(__pyx_int_2);
+  PyList_SET_ITEM(__pyx_t_3, 103, __pyx_int_2);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 104, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 104, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 105, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 105, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 106, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 106, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 107, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 107, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 108, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 108, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 109, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 109, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 110, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 110, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 111, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 111, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 112, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 112, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 113, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 113, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 114, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 114, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 115, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 115, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_3);
-  PyList_SET_ITEM(__pyx_t_1, 116, __pyx_int_3);
   __Pyx_GIVEREF(__pyx_int_3);
+  PyList_SET_ITEM(__pyx_t_3, 116, __pyx_int_3);
   __Pyx_INCREF(__pyx_int_0);
-  PyList_SET_ITEM(__pyx_t_1, 117, __pyx_int_0);
   __Pyx_GIVEREF(__pyx_int_0);
+  PyList_SET_ITEM(__pyx_t_3, 117, __pyx_int_0);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 118, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 118, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 119, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 119, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 120, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 120, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 121, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 121, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 122, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 122, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 123, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 123, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 124, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 124, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 125, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 125, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 126, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
+  PyList_SET_ITEM(__pyx_t_3, 126, __pyx_int_4);
   __Pyx_INCREF(__pyx_int_4);
-  PyList_SET_ITEM(__pyx_t_1, 127, __pyx_int_4);
   __Pyx_GIVEREF(__pyx_int_4);
-  __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  PyList_SET_ITEM(__pyx_t_3, 127, __pyx_int_4);
+  __pyx_t_2 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_5))) {
+    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5);
+    if (likely(__pyx_t_2)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
+      __Pyx_INCREF(__pyx_t_2);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_5, function);
+    }
+  }
+  if (!__pyx_t_2) {
+    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    __Pyx_GOTREF(__pyx_t_1);
+  } else {
+    __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL;
+    __Pyx_GIVEREF(__pyx_t_3);
+    PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_3);
+    __pyx_t_3 = 0;
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_np_nt_table, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
@@ -10986,394 +10629,399 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
  * 
  * 
  */
-  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyList_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
+  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
+  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+  __pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_5);
   __Pyx_INCREF(__pyx_n_s_M);
-  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_M);
   __Pyx_GIVEREF(__pyx_n_s_M);
+  PyList_SET_ITEM(__pyx_t_5, 0, __pyx_n_s_M);
   __Pyx_INCREF(__pyx_n_s_I);
-  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_I);
   __Pyx_GIVEREF(__pyx_n_s_I);
+  PyList_SET_ITEM(__pyx_t_5, 1, __pyx_n_s_I);
   __Pyx_INCREF(__pyx_n_s_D);
-  PyList_SET_ITEM(__pyx_t_1, 2, __pyx_n_s_D);
   __Pyx_GIVEREF(__pyx_n_s_D);
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  PyList_SET_ITEM(__pyx_t_5, 2, __pyx_n_s_D);
+  __pyx_t_3 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) {
+    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
+    if (likely(__pyx_t_3)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+      __Pyx_INCREF(__pyx_t_3);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_4, function);
+    }
+  }
+  if (!__pyx_t_3) {
+    __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+    __Pyx_GOTREF(__pyx_t_1);
+  } else {
+    __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = NULL;
+    __Pyx_GIVEREF(__pyx_t_5);
+    PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_5);
+    __pyx_t_5 = 0;
+    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_mid_table, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":148
+  /* "skbio/alignment/_ssw_wrapper.pyx":133
  * 
  *     @property
  *     def optimal_alignment_score(self):             # <<<<<<<<<<<<<<
  *         """Optimal alignment score
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_optimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":147
+  /* "skbio/alignment/_ssw_wrapper.pyx":132
  *         return score
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def optimal_alignment_score(self):
  *         """Optimal alignment score
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_optimal_alignment_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_optimal_alignment_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":160
+  /* "skbio/alignment/_ssw_wrapper.pyx":145
  * 
  *     @property
  *     def suboptimal_alignment_score(self):             # <<<<<<<<<<<<<<
  *         """Suboptimal alignment score
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_suboptimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_suboptimal_alignment_score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":159
+  /* "skbio/alignment/_ssw_wrapper.pyx":144
  *         return self.p.score1
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def suboptimal_alignment_score(self):
  *         """Suboptimal alignment score
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_suboptimal_alignment_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_suboptimal_alignment_score, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":172
+  /* "skbio/alignment/_ssw_wrapper.pyx":157
  * 
  *     @property
  *     def target_begin(self):             # <<<<<<<<<<<<<<
  *         """Character index where the target's alignment begins
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":171
+  /* "skbio/alignment/_ssw_wrapper.pyx":156
  *         return self.p.score2
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def target_begin(self):
  *         """Character index where the target's alignment begins
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_begin, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_begin, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":189
+  /* "skbio/alignment/_ssw_wrapper.pyx":174
  * 
  *     @property
  *     def target_end_optimal(self):             # <<<<<<<<<<<<<<
  *         """Character index where the target's optimal alignment ends
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_optimal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":188
+  /* "skbio/alignment/_ssw_wrapper.pyx":173
  *                                                             >= 0) else -1
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def target_end_optimal(self):
  *         """Character index where the target's optimal alignment ends
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_optimal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_optimal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":206
+  /* "skbio/alignment/_ssw_wrapper.pyx":191
  * 
  *     @property
  *     def target_end_suboptimal(self):             # <<<<<<<<<<<<<<
  *         """Character index where the target's suboptimal alignment ends
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_suboptimal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_end_suboptimal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":205
+  /* "skbio/alignment/_ssw_wrapper.pyx":190
  *         return self.p.ref_end1 + self.index_starts_at
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def target_end_suboptimal(self):
  *         """Character index where the target's suboptimal alignment ends
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_suboptimal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_end_suboptimal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":223
+  /* "skbio/alignment/_ssw_wrapper.pyx":208
  * 
  *     @property
  *     def query_begin(self):             # <<<<<<<<<<<<<<
  *         """Returns the character index at which the query sequence begins
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_begin); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":222
+  /* "skbio/alignment/_ssw_wrapper.pyx":207
  *         return self.p.ref_end2 + self.index_starts_at
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def query_begin(self):
  *         """Returns the character index at which the query sequence begins
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_begin, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_begin, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":240
+  /* "skbio/alignment/_ssw_wrapper.pyx":225
  * 
  *     @property
  *     def query_end(self):             # <<<<<<<<<<<<<<
  *         """Character index at where query sequence ends
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_end); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_end); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":239
+  /* "skbio/alignment/_ssw_wrapper.pyx":224
  *                                                              >= 0) else -1
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def query_end(self):
  *         """Character index at where query sequence ends
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_end, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_end, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":256
+  /* "skbio/alignment/_ssw_wrapper.pyx":241
  * 
  *     @property
  *     def cigar(self):             # <<<<<<<<<<<<<<
  *         """Cigar formatted string for the optimal alignment
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_cigar); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":255
+  /* "skbio/alignment/_ssw_wrapper.pyx":240
  *         return self.p.read_end1 + self.index_starts_at
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def cigar(self):
  *         """Cigar formatted string for the optimal alignment
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_cigar, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_cigar, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":294
+  /* "skbio/alignment/_ssw_wrapper.pyx":279
  * 
  *     @property
  *     def query_sequence(self):             # <<<<<<<<<<<<<<
  *         """Query sequence
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 294; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":293
+  /* "skbio/alignment/_ssw_wrapper.pyx":278
  *         return self._cigar_string
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def query_sequence(self):
  *         """Query sequence
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 294; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_query_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":306
+  /* "skbio/alignment/_ssw_wrapper.pyx":291
  * 
  *     @property
  *     def target_sequence(self):             # <<<<<<<<<<<<<<
  *         """Target sequence
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 306; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":305
+  /* "skbio/alignment/_ssw_wrapper.pyx":290
  *         return self.read_sequence
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def target_sequence(self):
  *         """Target sequence
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 305; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 305; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 306; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_target_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":318
+  /* "skbio/alignment/_ssw_wrapper.pyx":303
  * 
  *     @property
  *     def aligned_query_sequence(self):             # <<<<<<<<<<<<<<
  *         """Returns the query sequence aligned by the cigar
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_query_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 303; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":317
+  /* "skbio/alignment/_ssw_wrapper.pyx":302
  *         return self.reference_sequence
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def aligned_query_sequence(self):
  *         """Returns the query sequence aligned by the cigar
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_query_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_query_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 303; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":340
+  /* "skbio/alignment/_ssw_wrapper.pyx":325
  * 
  *     @property
  *     def aligned_target_sequence(self):             # <<<<<<<<<<<<<<
  *         """Returns the target sequence aligned by the cigar
  * 
  */
-  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetNameInClass((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure, __pyx_n_s_aligned_target_sequence); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":339
+  /* "skbio/alignment/_ssw_wrapper.pyx":324
  *         return None
  * 
  *     @property             # <<<<<<<<<<<<<<
  *     def aligned_target_sequence(self):
  *         """Returns the target sequence aligned by the cigar
  */
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+  __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_4);
   __Pyx_GIVEREF(__pyx_t_1);
+  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_property, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_target_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  if (PyDict_SetItem((PyObject *)__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure->tp_dict, __pyx_n_s_aligned_target_sequence, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   PyType_Modified(__pyx_ptype_5skbio_9alignment_12_ssw_wrapper_AlignmentStructure);
 
-  /* "skbio/alignment/_ssw_wrapper.pyx":731
- * 
- * 
- * def local_pairwise_align_ssw(sequence1, sequence2,             # <<<<<<<<<<<<<<
- *                              **kwargs):
- *     """Align query and target sequences with Striped Smith-Waterman.
- */
-  __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5skbio_9alignment_12_ssw_wrapper_1local_pairwise_align_ssw, NULL, __pyx_n_s_skbio_alignment__ssw_wrapper); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_local_pairwise_align_ssw, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
   /* "skbio/alignment/_ssw_wrapper.pyx":1
  * # -----------------------------------------------------------------------------             # <<<<<<<<<<<<<<
  * #  Copyright (c) 2013--, scikit-bio development team.
@@ -11384,20 +11032,27 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+  /* "../../../../../.virtualenvs/skbio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
  *     if arr.base is NULL:
  *         return None
  */
+
+  /*--- Wrapped vars code ---*/
+
   goto __pyx_L0;
   __pyx_L1_error:;
   __Pyx_XDECREF(__pyx_t_1);
   __Pyx_XDECREF(__pyx_t_2);
   __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_4);
+  __Pyx_XDECREF(__pyx_t_5);
   if (__pyx_m) {
-    __Pyx_AddTraceback("init skbio.alignment._ssw_wrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
+    if (__pyx_d) {
+      __Pyx_AddTraceback("init skbio.alignment._ssw_wrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
+    }
     Py_DECREF(__pyx_m); __pyx_m = 0;
   } else if (!PyErr_Occurred()) {
     PyErr_SetString(PyExc_ImportError, "init skbio.alignment._ssw_wrapper");
@@ -11411,7 +11066,7 @@ PyMODINIT_FUNC PyInit__ssw_wrapper(void)
   #endif
 }
 
-/* Runtime support code */
+/* --- Runtime support code --- */
 #if CYTHON_REFNANNY
 static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
     PyObject *m = NULL, *p = NULL;
@@ -11426,7 +11081,7 @@ end:
     Py_XDECREF(m);
     return (__Pyx_RefNannyAPIStruct *)r;
 }
-#endif /* CYTHON_REFNANNY */
+#endif
 
 static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
     PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
@@ -11604,14 +11259,10 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg
     ternaryfunc call = func->ob_type->tp_call;
     if (unlikely(!call))
         return PyObject_Call(func, arg, kw);
-#if PY_VERSION_HEX >= 0x02060000
     if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
         return NULL;
-#endif
     result = (*call)(func, arg, kw);
-#if PY_VERSION_HEX >= 0x02060000
     Py_LeaveRecursiveCall();
-#endif
     if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
         PyErr_SetString(
             PyExc_SystemError,
@@ -11621,8 +11272,8 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg
 }
 #endif
 
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
-        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
+        Py_ssize_t cstart, Py_ssize_t cstop,
         PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
         int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
 #if CYTHON_COMPILING_IN_CPYTHON
@@ -11722,7 +11373,7 @@ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
     PyObject *result;
 #if CYTHON_COMPILING_IN_CPYTHON
     result = PyDict_GetItem(__pyx_d, name);
-    if (result) {
+    if (likely(result)) {
         Py_INCREF(result);
     } else {
 #else
@@ -11743,7 +11394,8 @@ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j
     return r;
 }
 static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
-                                                              int wraparound, int boundscheck) {
+                                                              CYTHON_NCP_UNUSED int wraparound,
+                                                              CYTHON_NCP_UNUSED int boundscheck) {
 #if CYTHON_COMPILING_IN_CPYTHON
     if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
     if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
@@ -11757,7 +11409,8 @@ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_
 #endif
 }
 static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
-                                                              int wraparound, int boundscheck) {
+                                                              CYTHON_NCP_UNUSED int wraparound,
+                                                              CYTHON_NCP_UNUSED int boundscheck) {
 #if CYTHON_COMPILING_IN_CPYTHON
     if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
     if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
@@ -11770,8 +11423,9 @@ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize
     return PySequence_GetItem(o, i);
 #endif
 }
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
-                                                     int is_list, int wraparound, int boundscheck) {
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
+                                                     CYTHON_NCP_UNUSED int wraparound,
+                                                     CYTHON_NCP_UNUSED int boundscheck) {
 #if CYTHON_COMPILING_IN_CPYTHON
     if (is_list || PyList_CheckExact(o)) {
         Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
@@ -11813,6 +11467,70 @@ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
     return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
 }
 
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
+    PyObject *self, *result;
+    PyCFunction cfunc;
+    cfunc = PyCFunction_GET_FUNCTION(func);
+    self = PyCFunction_GET_SELF(func);
+    if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+        return NULL;
+    result = cfunc(self, arg);
+    Py_LeaveRecursiveCall();
+    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+        PyErr_SetString(
+            PyExc_SystemError,
+            "NULL result without error in PyObject_Call");
+    }
+    return result;
+}
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+    PyObject *result;
+    PyObject *args = PyTuple_New(1);
+    if (unlikely(!args)) return NULL;
+    Py_INCREF(arg);
+    PyTuple_SET_ITEM(args, 0, arg);
+    result = __Pyx_PyObject_Call(func, args, NULL);
+    Py_DECREF(args);
+    return result;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+#ifdef __Pyx_CyFunction_USED
+    if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
+#else
+    if (likely(PyCFunction_Check(func))) {
+#endif
+        if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
+            return __Pyx_PyObject_CallMethO(func, arg);
+        }
+    }
+    return __Pyx__PyObject_CallOneArg(func, arg);
+}
+#else
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+    PyObject* args = PyTuple_Pack(1, arg);
+    return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL;
+}
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
+#ifdef __Pyx_CyFunction_USED
+    if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
+#else
+    if (likely(PyCFunction_Check(func))) {
+#endif
+        if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
+            return __Pyx_PyObject_CallMethO(func, NULL);
+        }
+    }
+    return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
+}
+#endif
+
 static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
     PyErr_Format(PyExc_ValueError,
                  "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
@@ -11940,10 +11658,8 @@ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int
         Py_ssize_t length;
         int kind;
         void *data1, *data2;
-        #if CYTHON_PEP393_ENABLED
-        if (unlikely(PyUnicode_READY(s1) < 0) || unlikely(PyUnicode_READY(s2) < 0))
+        if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
             return -1;
-        #endif
         length = __Pyx_PyUnicode_GET_LENGTH(s1);
         if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
             goto return_ne;
@@ -12040,11 +11756,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
             goto raise_error;
         }
     }
-    #if PY_VERSION_HEX < 0x02050000
-    if (PyClass_Check(type)) {
-    #else
     if (PyType_Check(type)) {
-    #endif
 #if CYTHON_COMPILING_IN_PYPY
         if (!value) {
             Py_INCREF(Py_None);
@@ -12059,17 +11771,6 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
             goto raise_error;
         }
         value = type;
-        #if PY_VERSION_HEX < 0x02050000
-        if (PyInstance_Check(type)) {
-            type = (PyObject*) ((PyInstanceObject*)type)->in_class;
-            Py_INCREF(type);
-        } else {
-            type = 0;
-            PyErr_SetString(PyExc_TypeError,
-                "raise: exception must be an old-style class or instance");
-            goto raise_error;
-        }
-        #else
         type = (PyObject*) Py_TYPE(type);
         Py_INCREF(type);
         if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
@@ -12077,7 +11778,6 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
                 "raise: exception class must be a subclass of BaseException");
             goto raise_error;
         }
-        #endif
     }
     __Pyx_ErrRestore(type, value, tb);
     return;
@@ -12087,7 +11787,7 @@ raise_error:
     Py_XDECREF(tb);
     return;
 }
-#else /* Python 3+ */
+#else
 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
     PyObject* owned_instance = NULL;
     if (tb == Py_None) {
@@ -12112,10 +11812,13 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
         if (value && PyExceptionInstance_Check(value)) {
             instance_class = (PyObject*) Py_TYPE(value);
             if (instance_class != type) {
-                if (PyObject_IsSubclass(instance_class, type)) {
-                    type = instance_class;
-                } else {
+                int is_subclass = PyObject_IsSubclass(instance_class, type);
+                if (!is_subclass) {
                     instance_class = NULL;
+                } else if (unlikely(is_subclass == -1)) {
+                    goto bad;
+                } else {
+                    type = instance_class;
                 }
             }
         }
@@ -12173,6 +11876,13 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
     }
     PyErr_SetObject(type, value);
     if (tb) {
+#if CYTHON_COMPILING_IN_PYPY
+        PyObject *tmp_type, *tmp_value, *tmp_tb;
+        PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
+        Py_INCREF(tb);
+        PyErr_Restore(tmp_type, tmp_value, tb);
+        Py_XDECREF(tmp_tb);
+#else
         PyThreadState *tstate = PyThreadState_GET();
         PyObject* tmp_tb = tstate->curexc_traceback;
         if (tb != tmp_tb) {
@@ -12180,6 +11890,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
             tstate->curexc_traceback = tb;
             Py_XDECREF(tmp_tb);
         }
+#endif
     }
 bad:
     Py_XDECREF(owned_instance);
@@ -12247,7 +11958,7 @@ static int __Pyx_BufFmt_ParseNumber(const char** ts) {
 }
 static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
     int number = __Pyx_BufFmt_ParseNumber(ts);
-    if (number == -1) /* First char was not a digit */
+    if (number == -1)
         PyErr_Format(PyExc_ValueError,\
                      "Does not understand character buffer dtype format string ('%c')", **ts);
     return number;
@@ -12492,7 +12203,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
     ctx->fmt_offset += size;
     if (arraysize)
       ctx->fmt_offset += (arraysize - 1) * size;
-    --ctx->enc_count; /* Consume from buffer string */
+    --ctx->enc_count;
     while (1) {
       if (field == &ctx->root) {
         ctx->head = NULL;
@@ -12500,7 +12211,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
           __Pyx_BufFmt_RaiseExpected(ctx);
           return -1;
         }
-        break; /* breaks both loops as ctx->enc_count == 0 */
+        break;
       }
       ctx->head->field = ++field;
       if (field->type == NULL) {
@@ -12509,7 +12220,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
         continue;
       } else if (field->type->typegroup == 'S') {
         size_t parent_offset = ctx->head->parent_offset + field->offset;
-        if (field->type->fields->type == NULL) continue; /* empty struct */
+        if (field->type->fields->type == NULL) continue;
         field = field->type->fields;
         ++ctx->head;
         ctx->head->field = field;
@@ -12541,7 +12252,7 @@ __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
     while (*ts && *ts != ')') {
         switch (*ts) {
             case ' ': case '\f': case '\r': case '\n': case '\t': case '\v':  continue;
-            default:  break;  /* not a 'break' in the loop */
+            default:  break;
         }
         number = __Pyx_BufFmt_ExpectNumber(&ts);
         if (number == -1) return NULL;
@@ -12610,7 +12321,7 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
       case '^':
         ctx->new_packmode = *ts++;
         break;
-      case 'T': /* substruct */
+      case 'T':
         {
           const char* ts_after_sub;
           size_t i, struct_count = ctx->new_count;
@@ -12622,7 +12333,7 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
             return NULL;
           }
           if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-          ctx->enc_type = 0; /* Erase processed last struct element */
+          ctx->enc_type = 0;
           ctx->enc_count = 0;
           ctx->struct_alignment = 0;
           ++ts;
@@ -12635,12 +12346,12 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
           if (struct_alignment) ctx->struct_alignment = struct_alignment;
         }
         break;
-      case '}': /* end of substruct; either repeat or move on */
+      case '}':
         {
           size_t alignment = ctx->struct_alignment;
           ++ts;
           if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-          ctx->enc_type = 0; /* Erase processed last struct element */
+          ctx->enc_type = 0;
           if (alignment && ctx->fmt_offset % alignment) {
             ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
           }
@@ -12765,39 +12476,12 @@ static void __Pyx_RaiseBufferIndexError(int axis) {
      "Out of bounds on buffer access (axis %d)", axis);
 }
 
-static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value) {
-    PyObject* value;
-#if PY_MAJOR_VERSION >= 3
-    value = PyDict_GetItemWithError(d, key);
-    if (unlikely(!value)) {
-        if (unlikely(PyErr_Occurred()))
-            return NULL;
-        value = default_value;
-    }
-    Py_INCREF(value);
-#else
-    if (PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key)) {
-        value = PyDict_GetItem(d, key);
-        if (unlikely(!value)) {
-            value = default_value;
-        }
-        Py_INCREF(value);
-    } else {
-        if (default_value == Py_None)
-            default_value = NULL;
-        value = PyObject_CallMethodObjArgs(
-            d, __pyx_n_s_get, key, default_value, NULL);
-    }
-#endif
-    return value;
-}
-
 static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
     PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
 }
 
 static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
-#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
+#if PY_VERSION_HEX >= 0x02070000
     PyObject *ob = PyCapsule_New(vtable, 0, 0);
 #else
     PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
@@ -12834,72 +12518,187 @@ static PyObject *__Pyx_GetNameInClass(PyObject *nmspace, PyObject *name) {
     return result;
 }
 
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+    int start = 0, mid = 0, end = count - 1;
+    if (end >= 0 && code_line > entries[end].code_line) {
+        return count;
+    }
+    while (start < end) {
+        mid = (start + end) / 2;
+        if (code_line < entries[mid].code_line) {
+            end = mid;
+        } else if (code_line > entries[mid].code_line) {
+             start = mid + 1;
+        } else {
+            return mid;
+        }
+    }
+    if (code_line <= entries[mid].code_line) {
+        return mid;
+    } else {
+        return mid + 1;
+    }
+}
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+    PyCodeObject* code_object;
+    int pos;
+    if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+        return NULL;
+    }
+    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+    if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+        return NULL;
+    }
+    code_object = __pyx_code_cache.entries[pos].code_object;
+    Py_INCREF(code_object);
+    return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+    int pos, i;
+    __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+    if (unlikely(!code_line)) {
+        return;
+    }
+    if (unlikely(!entries)) {
+        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+        if (likely(entries)) {
+            __pyx_code_cache.entries = entries;
+            __pyx_code_cache.max_count = 64;
+            __pyx_code_cache.count = 1;
+            entries[0].code_line = code_line;
+            entries[0].code_object = code_object;
+            Py_INCREF(code_object);
+        }
+        return;
+    }
+    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+    if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+        PyCodeObject* tmp = entries[pos].code_object;
+        entries[pos].code_object = code_object;
+        Py_DECREF(tmp);
+        return;
+    }
+    if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+        int new_max = __pyx_code_cache.max_count + 64;
+        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+            __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
+        if (unlikely(!entries)) {
+            return;
+        }
+        __pyx_code_cache.entries = entries;
+        __pyx_code_cache.max_count = new_max;
+    }
+    for (i=__pyx_code_cache.count; i>pos; i--) {
+        entries[i] = entries[i-1];
+    }
+    entries[pos].code_line = code_line;
+    entries[pos].code_object = code_object;
+    __pyx_code_cache.count++;
+    Py_INCREF(code_object);
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+            const char *funcname, int c_line,
+            int py_line, const char *filename) {
+    PyCodeObject *py_code = 0;
+    PyObject *py_srcfile = 0;
+    PyObject *py_funcname = 0;
+    #if PY_MAJOR_VERSION < 3
+    py_srcfile = PyString_FromString(filename);
+    #else
+    py_srcfile = PyUnicode_FromString(filename);
+    #endif
+    if (!py_srcfile) goto bad;
+    if (c_line) {
+        #if PY_MAJOR_VERSION < 3
+        py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+        #else
+        py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+        #endif
+    }
+    else {
+        #if PY_MAJOR_VERSION < 3
+        py_funcname = PyString_FromString(funcname);
+        #else
+        py_funcname = PyUnicode_FromString(funcname);
+        #endif
+    }
+    if (!py_funcname) goto bad;
+    py_code = __Pyx_PyCode_New(
+        0,
+        0,
+        0,
+        0,
+        0,
+        __pyx_empty_bytes, /*PyObject *code,*/
+        __pyx_empty_tuple, /*PyObject *consts,*/
+        __pyx_empty_tuple, /*PyObject *names,*/
+        __pyx_empty_tuple, /*PyObject *varnames,*/
+        __pyx_empty_tuple, /*PyObject *freevars,*/
+        __pyx_empty_tuple, /*PyObject *cellvars,*/
+        py_srcfile,   /*PyObject *filename,*/
+        py_funcname,  /*PyObject *name,*/
+        py_line,
+        __pyx_empty_bytes  /*PyObject *lnotab*/
+    );
+    Py_DECREF(py_srcfile);
+    Py_DECREF(py_funcname);
+    return py_code;
+bad:
+    Py_XDECREF(py_srcfile);
+    Py_XDECREF(py_funcname);
+    return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+                               int py_line, const char *filename) {
+    PyCodeObject *py_code = 0;
+    PyFrameObject *py_frame = 0;
+    py_code = __pyx_find_code_object(c_line ? c_line : py_line);
+    if (!py_code) {
+        py_code = __Pyx_CreateCodeObjectForTraceback(
+            funcname, c_line, py_line, filename);
+        if (!py_code) goto bad;
+        __pyx_insert_code_object(c_line ? c_line : py_line, py_code);
+    }
+    py_frame = PyFrame_New(
+        PyThreadState_GET(), /*PyThreadState *tstate,*/
+        py_code,             /*PyCodeObject *code,*/
+        __pyx_d,      /*PyObject *globals,*/
+        0                    /*PyObject *locals*/
+    );
+    if (!py_frame) goto bad;
+    py_frame->f_lineno = py_line;
+    PyTraceBack_Here(py_frame);
+bad:
+    Py_XDECREF(py_code);
+    Py_XDECREF(py_frame);
+}
+
 #if PY_MAJOR_VERSION < 3
 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
-  #if PY_VERSION_HEX >= 0x02060000
     if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
-  #endif
         if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
-  #if PY_VERSION_HEX < 0x02060000
-    if (obj->ob_type->tp_dict) {
-        PyObject *getbuffer_cobj = PyObject_GetItem(
-            obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer);
-        if (getbuffer_cobj) {
-            getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj);
-            Py_DECREF(getbuffer_cobj);
-            if (!func)
-                goto fail;
-            return func(obj, view, flags);
-        } else {
-            PyErr_Clear();
-        }
-    }
-  #endif
     PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
-#if PY_VERSION_HEX < 0x02060000
-fail:
-#endif
     return -1;
 }
 static void __Pyx_ReleaseBuffer(Py_buffer *view) {
     PyObject *obj = view->obj;
     if (!obj) return;
-  #if PY_VERSION_HEX >= 0x02060000
     if (PyObject_CheckBuffer(obj)) {
         PyBuffer_Release(view);
         return;
     }
-  #endif
         if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; }
-  #if PY_VERSION_HEX < 0x02060000
-    if (obj->ob_type->tp_dict) {
-        PyObject *releasebuffer_cobj = PyObject_GetItem(
-            obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer);
-        if (releasebuffer_cobj) {
-            releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj);
-            Py_DECREF(releasebuffer_cobj);
-            if (!func)
-                goto fail;
-            func(obj, view);
-            return;
-        } else {
-            PyErr_Clear();
-        }
-    }
-  #endif
-    goto nofail;
-#if PY_VERSION_HEX < 0x02060000
-fail:
-#endif
-    PyErr_WriteUnraisable(obj);
-nofail:
     Py_DECREF(obj);
     view->obj = NULL;
 }
-#endif /*  PY_MAJOR_VERSION < 3 */
+#endif
 
 
-        static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+          static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
     PyObject *empty_list = 0;
     PyObject *module = 0;
     PyObject *global_dict = 0;
@@ -12925,7 +12724,6 @@ nofail:
     empty_dict = PyDict_New();
     if (!empty_dict)
         goto bad;
-    #if PY_VERSION_HEX >= 0x02050000
     {
         #if PY_MAJOR_VERSION >= 3
         if (level == -1) {
@@ -12947,7 +12745,7 @@ nofail:
                     PyErr_Clear();
                 }
             }
-            level = 0; /* try absolute import on failure */
+            level = 0;
         }
         #endif
         if (!module) {
@@ -12964,14 +12762,6 @@ nofail:
             #endif
         }
     }
-    #else
-    if (level>0) {
-        PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
-        goto bad;
-    }
-    module = PyObject_CallFunctionObjArgs(py_import,
-        name, global_dict, empty_dict, list, NULL);
-    #endif
 bad:
     #if PY_VERSION_HEX < 0x03030000
     Py_XDECREF(py_import);
@@ -12981,17 +12771,16 @@ bad:
     return module;
 }
 
-#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func)             \
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)       \
     {                                                                     \
-        func_type value = func(x);                                        \
+        func_type value = func_value;                                     \
         if (sizeof(target_type) < sizeof(func_type)) {                    \
             if (unlikely(value != (func_type) (target_type) value)) {     \
                 func_type zero = 0;                                       \
-                PyErr_SetString(PyExc_OverflowError,                      \
-                    (is_unsigned && unlikely(value < zero)) ?             \
-                    "can't convert negative value to " #target_type :     \
-                    "value too large to convert to " #target_type);       \
-                return (target_type) -1;                                  \
+                if (is_unsigned && unlikely(value < zero))                \
+                    goto raise_neg_overflow;                              \
+                else                                                      \
+                    goto raise_overflow;                                  \
             }                                                             \
         }                                                                 \
         return (target_type) value;                                       \
@@ -13002,19 +12791,18 @@ bad:
   #include "longintrepr.h"
  #endif
 #endif
+
 static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
     const int neg_one = (int) -1, const_zero = 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
         if (sizeof(int) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG)
+            __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to int");
-                return (int) -1;
+                goto raise_neg_overflow;
             }
             return (int) val;
         }
@@ -13024,40 +12812,44 @@ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
         if (is_unsigned) {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(int)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return (int) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(int, digit, ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to int");
-                return (int) -1;
+                goto raise_neg_overflow;
             }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (int) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
+            }
+#endif
             if (sizeof(int) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong)
-            } else if (sizeof(int) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong)
+                __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(int)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return +(int) ((PyLongObject*)x)->ob_digit[0];
-                    case -1: return -(int) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +(((PyLongObject*)x)->ob_digit[0]));
+                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (sizeof(int) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong)
-            } else if (sizeof(int) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong)
+                __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong(x))
+            } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(int, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13095,6 +12887,14 @@ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
         Py_DECREF(tmp);
         return val;
     }
+raise_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "value too large to convert to int");
+    return (int) -1;
+raise_neg_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "can't convert negative value to int");
+    return (int) -1;
 }
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_uint16(npy_uint16 value) {
@@ -13105,14 +12905,14 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_uint16(npy_uint16 value) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(npy_uint16) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(npy_uint16) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(npy_uint16) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(npy_uint16) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(npy_uint16) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(npy_uint16) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -13131,14 +12931,14 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(int) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(int) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(int) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(int) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -13157,14 +12957,14 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int32(npy_int32 value) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(npy_int32) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(npy_int32) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(npy_int32) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(npy_int32) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -13175,24 +12975,17 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int32(npy_int32 value) {
     }
 }
 
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
 static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) {
     const npy_int32 neg_one = (npy_int32) -1, const_zero = 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
         if (sizeof(npy_int32) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG)
+            __PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to npy_int32");
-                return (npy_int32) -1;
+                goto raise_neg_overflow;
             }
             return (npy_int32) val;
         }
@@ -13202,40 +12995,44 @@ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) {
         if (is_unsigned) {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(npy_int32)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return (npy_int32) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to npy_int32");
-                return (npy_int32) -1;
+                goto raise_neg_overflow;
+            }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (npy_int32) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
             }
+#endif
             if (sizeof(npy_int32) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, PyLong_AsUnsignedLong)
-            } else if (sizeof(npy_int32) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long long, PyLong_AsUnsignedLongLong)
+                __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(npy_int32)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return +(npy_int32) ((PyLongObject*)x)->ob_digit[0];
-                    case -1: return -(npy_int32) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int32,  digit, +(((PyLongObject*)x)->ob_digit[0]));
+                case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (sizeof(npy_int32) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int32, long, PyLong_AsLong)
-            } else if (sizeof(npy_int32) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int32, long long, PyLong_AsLongLong)
+                __PYX_VERIFY_RETURN_INT(npy_int32, long, PyLong_AsLong(x))
+            } else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13273,6 +13070,14 @@ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) {
         Py_DECREF(tmp);
         return val;
     }
+raise_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "value too large to convert to npy_int32");
+    return (npy_int32) -1;
+raise_neg_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "can't convert negative value to npy_int32");
+    return (npy_int32) -1;
 }
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
@@ -13283,14 +13088,14 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
             return PyInt_FromLong((long) value);
         } else if (sizeof(long) <= sizeof(unsigned long)) {
             return PyLong_FromUnsignedLong((unsigned long) value);
-        } else if (sizeof(long) <= sizeof(unsigned long long)) {
-            return PyLong_FromUnsignedLongLong((unsigned long long) value);
+        } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
         }
     } else {
         if (sizeof(long) <= sizeof(long)) {
             return PyInt_FromLong((long) value);
-        } else if (sizeof(long) <= sizeof(long long)) {
-            return PyLong_FromLongLong((long long) value);
+        } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+            return PyLong_FromLongLong((PY_LONG_LONG) value);
         }
     }
     {
@@ -13301,24 +13106,17 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
     }
 }
 
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
 static CYTHON_INLINE npy_uint8 __Pyx_PyInt_As_npy_uint8(PyObject *x) {
     const npy_uint8 neg_one = (npy_uint8) -1, const_zero = 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
         if (sizeof(npy_uint8) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(npy_uint8, long, PyInt_AS_LONG)
+            __PYX_VERIFY_RETURN_INT(npy_uint8, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to npy_uint8");
-                return (npy_uint8) -1;
+                goto raise_neg_overflow;
             }
             return (npy_uint8) val;
         }
@@ -13328,40 +13126,44 @@ static CYTHON_INLINE npy_uint8 __Pyx_PyInt_As_npy_uint8(PyObject *x) {
         if (is_unsigned) {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(npy_uint8)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return (npy_uint8) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_uint8, digit, ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to npy_uint8");
-                return (npy_uint8) -1;
+                goto raise_neg_overflow;
+            }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (npy_uint8) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
             }
+#endif
             if (sizeof(npy_uint8) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, PyLong_AsUnsignedLong)
-            } else if (sizeof(npy_uint8) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long long, PyLong_AsUnsignedLongLong)
+                __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(npy_uint8) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint8, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(npy_uint8)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return +(npy_uint8) ((PyLongObject*)x)->ob_digit[0];
-                    case -1: return -(npy_uint8) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_uint8,  digit, +(((PyLongObject*)x)->ob_digit[0]));
+                case -1: __PYX_VERIFY_RETURN_INT(npy_uint8, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (sizeof(npy_uint8) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint8, long, PyLong_AsLong)
-            } else if (sizeof(npy_uint8) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint8, long long, PyLong_AsLongLong)
+                __PYX_VERIFY_RETURN_INT(npy_uint8, long, PyLong_AsLong(x))
+            } else if (sizeof(npy_uint8) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint8, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13399,26 +13201,27 @@ static CYTHON_INLINE npy_uint8 __Pyx_PyInt_As_npy_uint8(PyObject *x) {
         Py_DECREF(tmp);
         return val;
     }
+raise_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "value too large to convert to npy_uint8");
+    return (npy_uint8) -1;
+raise_neg_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "can't convert negative value to npy_uint8");
+    return (npy_uint8) -1;
 }
 
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
 static CYTHON_INLINE npy_uint16 __Pyx_PyInt_As_npy_uint16(PyObject *x) {
     const npy_uint16 neg_one = (npy_uint16) -1, const_zero = 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
         if (sizeof(npy_uint16) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(npy_uint16, long, PyInt_AS_LONG)
+            __PYX_VERIFY_RETURN_INT(npy_uint16, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to npy_uint16");
-                return (npy_uint16) -1;
+                goto raise_neg_overflow;
             }
             return (npy_uint16) val;
         }
@@ -13428,40 +13231,44 @@ static CYTHON_INLINE npy_uint16 __Pyx_PyInt_As_npy_uint16(PyObject *x) {
         if (is_unsigned) {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(npy_uint16)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return (npy_uint16) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_uint16, digit, ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to npy_uint16");
-                return (npy_uint16) -1;
+                goto raise_neg_overflow;
+            }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (npy_uint16) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
             }
+#endif
             if (sizeof(npy_uint16) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, PyLong_AsUnsignedLong)
-            } else if (sizeof(npy_uint16) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long long, PyLong_AsUnsignedLongLong)
+                __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(npy_uint16) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint16, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(npy_uint16)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return +(npy_uint16) ((PyLongObject*)x)->ob_digit[0];
-                    case -1: return -(npy_uint16) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_uint16,  digit, +(((PyLongObject*)x)->ob_digit[0]));
+                case -1: __PYX_VERIFY_RETURN_INT(npy_uint16, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (sizeof(npy_uint16) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint16, long, PyLong_AsLong)
-            } else if (sizeof(npy_uint16) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(npy_uint16, long long, PyLong_AsLongLong)
+                __PYX_VERIFY_RETURN_INT(npy_uint16, long, PyLong_AsLong(x))
+            } else if (sizeof(npy_uint16) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(npy_uint16, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13499,26 +13306,27 @@ static CYTHON_INLINE npy_uint16 __Pyx_PyInt_As_npy_uint16(PyObject *x) {
         Py_DECREF(tmp);
         return val;
     }
+raise_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "value too large to convert to npy_uint16");
+    return (npy_uint16) -1;
+raise_neg_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "can't convert negative value to npy_uint16");
+    return (npy_uint16) -1;
 }
 
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
 static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *x) {
     const npy_int8 neg_one = (npy_int8) -1, const_zero = 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
         if (sizeof(npy_int8) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(npy_int8, long, PyInt_AS_LONG)
+            __PYX_VERIFY_RETURN_INT(npy_int8, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to npy_int8");
-                return (npy_int8) -1;
+                goto raise_neg_overflow;
             }
             return (npy_int8) val;
         }
@@ -13528,40 +13336,44 @@ static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *x) {
         if (is_unsigned) {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(npy_int8)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return (npy_int8) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int8, digit, ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to npy_int8");
-                return (npy_int8) -1;
+                goto raise_neg_overflow;
             }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (npy_int8) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
+            }
+#endif
             if (sizeof(npy_int8) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, PyLong_AsUnsignedLong)
-            } else if (sizeof(npy_int8) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long long, PyLong_AsUnsignedLongLong)
+                __PYX_VERIFY_RETURN_INT(npy_int8, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(npy_int8) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(npy_int8, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(npy_int8)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return +(npy_int8) ((PyLongObject*)x)->ob_digit[0];
-                    case -1: return -(npy_int8) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(npy_int8,  digit, +(((PyLongObject*)x)->ob_digit[0]));
+                case -1: __PYX_VERIFY_RETURN_INT(npy_int8, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (sizeof(npy_int8) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int8, long, PyLong_AsLong)
-            } else if (sizeof(npy_int8) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(npy_int8, long long, PyLong_AsLongLong)
+                __PYX_VERIFY_RETURN_INT(npy_int8, long, PyLong_AsLong(x))
+            } else if (sizeof(npy_int8) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(npy_int8, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13599,6 +13411,14 @@ static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *x) {
         Py_DECREF(tmp);
         return val;
     }
+raise_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "value too large to convert to npy_int8");
+    return (npy_int8) -1;
+raise_neg_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "can't convert negative value to npy_int8");
+    return (npy_int8) -1;
 }
 
 #if CYTHON_CCOMPLEX
@@ -13841,24 +13661,17 @@ static CYTHON_INLINE npy_int8 __Pyx_PyInt_As_npy_int8(PyObject *x) {
     #endif
 #endif
 
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
 static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
     const long neg_one = (long) -1, const_zero = 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
         if (sizeof(long) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG)
+            __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to long");
-                return (long) -1;
+                goto raise_neg_overflow;
             }
             return (long) val;
         }
@@ -13868,40 +13681,44 @@ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
         if (is_unsigned) {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(long)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return (long) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(long, digit, ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
+#if CYTHON_COMPILING_IN_CPYTHON
             if (unlikely(Py_SIZE(x) < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to long");
-                return (long) -1;
+                goto raise_neg_overflow;
+            }
+#else
+            {
+                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+                if (unlikely(result < 0))
+                    return (long) -1;
+                if (unlikely(result == 1))
+                    goto raise_neg_overflow;
             }
+#endif
             if (sizeof(long) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong)
-            } else if (sizeof(long) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong)
+                __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong(x))
+            } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
             }
         } else {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(long)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return +(long) ((PyLongObject*)x)->ob_digit[0];
-                    case -1: return -(long) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +(((PyLongObject*)x)->ob_digit[0]));
+                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (sizeof(long) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong)
-            } else if (sizeof(long) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong)
+                __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong(x))
+            } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+                __PYX_VERIFY_RETURN_INT(long, PY_LONG_LONG, PyLong_AsLongLong(x))
             }
         }
         {
@@ -13939,6 +13756,14 @@ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
         Py_DECREF(tmp);
         return val;
     }
+raise_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "value too large to convert to long");
+    return (long) -1;
+raise_neg_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "can't convert negative value to long");
+    return (long) -1;
 }
 
 static int __Pyx_check_binary_version(void) {
@@ -13951,11 +13776,7 @@ static int __Pyx_check_binary_version(void) {
                       "compiletime version %s of module '%.100s' "
                       "does not match runtime version %s",
                       ctversion, __Pyx_MODULE_NAME, rtversion);
-        #if PY_VERSION_HEX < 0x02050000
-        return PyErr_Warn(NULL, message);
-        #else
         return PyErr_WarnEx(NULL, message, 1);
-        #endif
     }
     return 0;
 }
@@ -14025,11 +13846,7 @@ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class
         PyOS_snprintf(warning, sizeof(warning),
             "%s.%s size changed, may indicate binary incompatibility",
             module_name, class_name);
-        #if PY_VERSION_HEX < 0x02050000
-        if (PyErr_Warn(NULL, warning) < 0) goto bad;
-        #else
         if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
-        #endif
     }
     else if ((size_t)basicsize != size) {
         PyErr_Format(PyExc_ValueError,
@@ -14045,168 +13862,6 @@ bad:
 }
 #endif
 
-static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
-    int start = 0, mid = 0, end = count - 1;
-    if (end >= 0 && code_line > entries[end].code_line) {
-        return count;
-    }
-    while (start < end) {
-        mid = (start + end) / 2;
-        if (code_line < entries[mid].code_line) {
-            end = mid;
-        } else if (code_line > entries[mid].code_line) {
-             start = mid + 1;
-        } else {
-            return mid;
-        }
-    }
-    if (code_line <= entries[mid].code_line) {
-        return mid;
-    } else {
-        return mid + 1;
-    }
-}
-static PyCodeObject *__pyx_find_code_object(int code_line) {
-    PyCodeObject* code_object;
-    int pos;
-    if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
-        return NULL;
-    }
-    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
-    if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
-        return NULL;
-    }
-    code_object = __pyx_code_cache.entries[pos].code_object;
-    Py_INCREF(code_object);
-    return code_object;
-}
-static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
-    int pos, i;
-    __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
-    if (unlikely(!code_line)) {
-        return;
-    }
-    if (unlikely(!entries)) {
-        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
-        if (likely(entries)) {
-            __pyx_code_cache.entries = entries;
-            __pyx_code_cache.max_count = 64;
-            __pyx_code_cache.count = 1;
-            entries[0].code_line = code_line;
-            entries[0].code_object = code_object;
-            Py_INCREF(code_object);
-        }
-        return;
-    }
-    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
-    if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
-        PyCodeObject* tmp = entries[pos].code_object;
-        entries[pos].code_object = code_object;
-        Py_DECREF(tmp);
-        return;
-    }
-    if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
-        int new_max = __pyx_code_cache.max_count + 64;
-        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
-            __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
-        if (unlikely(!entries)) {
-            return;
-        }
-        __pyx_code_cache.entries = entries;
-        __pyx_code_cache.max_count = new_max;
-    }
-    for (i=__pyx_code_cache.count; i>pos; i--) {
-        entries[i] = entries[i-1];
-    }
-    entries[pos].code_line = code_line;
-    entries[pos].code_object = code_object;
-    __pyx_code_cache.count++;
-    Py_INCREF(code_object);
-}
-
-#include "compile.h"
-#include "frameobject.h"
-#include "traceback.h"
-static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
-            const char *funcname, int c_line,
-            int py_line, const char *filename) {
-    PyCodeObject *py_code = 0;
-    PyObject *py_srcfile = 0;
-    PyObject *py_funcname = 0;
-    #if PY_MAJOR_VERSION < 3
-    py_srcfile = PyString_FromString(filename);
-    #else
-    py_srcfile = PyUnicode_FromString(filename);
-    #endif
-    if (!py_srcfile) goto bad;
-    if (c_line) {
-        #if PY_MAJOR_VERSION < 3
-        py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
-        #else
-        py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
-        #endif
-    }
-    else {
-        #if PY_MAJOR_VERSION < 3
-        py_funcname = PyString_FromString(funcname);
-        #else
-        py_funcname = PyUnicode_FromString(funcname);
-        #endif
-    }
-    if (!py_funcname) goto bad;
-    py_code = __Pyx_PyCode_New(
-        0,            /*int argcount,*/
-        0,            /*int kwonlyargcount,*/
-        0,            /*int nlocals,*/
-        0,            /*int stacksize,*/
-        0,            /*int flags,*/
-        __pyx_empty_bytes, /*PyObject *code,*/
-        __pyx_empty_tuple, /*PyObject *consts,*/
-        __pyx_empty_tuple, /*PyObject *names,*/
-        __pyx_empty_tuple, /*PyObject *varnames,*/
-        __pyx_empty_tuple, /*PyObject *freevars,*/
-        __pyx_empty_tuple, /*PyObject *cellvars,*/
-        py_srcfile,   /*PyObject *filename,*/
-        py_funcname,  /*PyObject *name,*/
-        py_line,      /*int firstlineno,*/
-        __pyx_empty_bytes  /*PyObject *lnotab*/
-    );
-    Py_DECREF(py_srcfile);
-    Py_DECREF(py_funcname);
-    return py_code;
-bad:
-    Py_XDECREF(py_srcfile);
-    Py_XDECREF(py_funcname);
-    return NULL;
-}
-static void __Pyx_AddTraceback(const char *funcname, int c_line,
-                               int py_line, const char *filename) {
-    PyCodeObject *py_code = 0;
-    PyObject *py_globals = 0;
-    PyFrameObject *py_frame = 0;
-    py_code = __pyx_find_code_object(c_line ? c_line : py_line);
-    if (!py_code) {
-        py_code = __Pyx_CreateCodeObjectForTraceback(
-            funcname, c_line, py_line, filename);
-        if (!py_code) goto bad;
-        __pyx_insert_code_object(c_line ? c_line : py_line, py_code);
-    }
-    py_globals = PyModule_GetDict(__pyx_m);
-    if (!py_globals) goto bad;
-    py_frame = PyFrame_New(
-        PyThreadState_GET(), /*PyThreadState *tstate,*/
-        py_code,             /*PyCodeObject *code,*/
-        py_globals,          /*PyObject *globals,*/
-        0                    /*PyObject *locals*/
-    );
-    if (!py_frame) goto bad;
-    py_frame->f_lineno = py_line;
-    PyTraceBack_Here(py_frame);
-bad:
-    Py_XDECREF(py_code);
-    Py_XDECREF(py_frame);
-}
-
 static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
     while (t->p) {
         #if PY_MAJOR_VERSION < 3
@@ -14217,7 +13872,7 @@ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
         } else {
             *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
         }
-        #else  /* Python 3+ has unicode identifiers */
+        #else
         if (t->is_unicode | t->is_str) {
             if (t->intern) {
                 *t->p = PyUnicode_InternFromString(t->s);
@@ -14267,11 +13922,11 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_
                 }
             }
         }
-#endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/
+#endif
         *length = PyBytes_GET_SIZE(defenc);
         return defenc_c;
-#else /* PY_VERSION_HEX < 0x03030000 */
-        if (PyUnicode_READY(o) == -1) return NULL;
+#else
+        if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
 #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
         if (PyUnicode_IS_ASCII(o)) {
             *length = PyUnicode_GET_LENGTH(o);
@@ -14280,20 +13935,18 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_
             PyUnicode_AsASCIIString(o);
             return NULL;
         }
-#else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
+#else
         return PyUnicode_AsUTF8AndSize(o, length);
-#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
-#endif /* PY_VERSION_HEX < 0x03030000 */
+#endif
+#endif
     } else
-#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII  || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */
+#endif
 #if !CYTHON_COMPILING_IN_PYPY
-#if PY_VERSION_HEX >= 0x02060000
     if (PyByteArray_Check(o)) {
         *length = PyByteArray_GET_SIZE(o);
         return PyByteArray_AS_STRING(o);
     } else
 #endif
-#endif
     {
         char* result;
         int r = PyBytes_AsStringAndSize(o, &result, length);
@@ -14354,11 +14007,6 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
   }
   return res;
 }
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
 static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
   Py_ssize_t ival;
   PyObject *x;
@@ -14376,11 +14024,7 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
        }
      #endif
     #endif
-  #if PY_VERSION_HEX < 0x02060000
-    return PyInt_AsSsize_t(b);
-  #else
     return PyLong_AsSsize_t(b);
-  #endif
   }
   x = PyNumber_Index(b);
   if (!x) return -1;
@@ -14389,17 +14033,7 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
   return ival;
 }
 static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
-#if PY_VERSION_HEX < 0x02050000
-   if (ival <= LONG_MAX)
-       return PyInt_FromLong((long)ival);
-   else {
-       unsigned char *bytes = (unsigned char *) &ival;
-       int one = 1; int little = (int)*(unsigned char*)&one;
-       return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
-   }
-#else
-   return PyInt_FromSize_t(ival);
-#endif
+    return PyInt_FromSize_t(ival);
 }
 
 
diff --git a/skbio/alignment/_ssw_wrapper.pyx b/skbio/alignment/_ssw_wrapper.pyx
index 1c43e97..5a65d2e 100644
--- a/skbio/alignment/_ssw_wrapper.pyx
+++ b/skbio/alignment/_ssw_wrapper.pyx
@@ -10,7 +10,7 @@ from cpython cimport bool
 import numpy as np
 cimport numpy as cnp
 from skbio.alignment import Alignment
-from skbio.sequence import ProteinSequence, NucleotideSequence
+from skbio.sequence import Protein, Sequence
 
 cdef extern from "_lib/ssw.h":
 
@@ -74,21 +74,6 @@ mid_table = np.array(['M', 'I', 'D'])
 cdef class AlignmentStructure:
     """Wraps the result of an alignment c struct so it is accessible to Python
 
-    Attributes
-    ----------
-    optimal_alignment_score
-    suboptimal_alignment_score
-    target_begin
-    target_end_optimal
-    target_end_suboptimal
-    query_begin
-    query_end
-    cigar
-    query_sequence
-    target_sequence
-    aligned_query_sequence
-    aligned_target_sequence
-
     Notes
     -----
     `cigar` may be empty depending on parameters used.
@@ -521,7 +506,7 @@ cdef class StripedSmithWaterman:
     Notes
     -----
     This is a wrapper for the SSW package [1]_.
-    
+
     `mask_length` has to be >= 15, otherwise the suboptimal alignment
     information will NOT be returned.
 
@@ -532,7 +517,7 @@ cdef class StripedSmithWaterman:
     nucleotide sequences.
 
     A substitution matrix must be provided when working with protein sequences.
-    
+
     References
     ----------
     .. [1] Zhao, Mengyao, Wan-Ping Lee, Erik P. Garrison, & Gabor T.
@@ -726,79 +711,3 @@ cdef class StripedSmithWaterman:
                 py_list_matrix[i] = dict2d[row][column]
                 i += 1
         return py_list_matrix
-
-
-def local_pairwise_align_ssw(sequence1, sequence2,
-                             **kwargs):
-    """Align query and target sequences with Striped Smith-Waterman.
-
-    Parameters
-    ----------
-    sequence1 : str or BiologicalSequence
-        The first unaligned sequence
-    sequence2 : str or BiologicalSequence
-        The second unaligned sequence
-
-    Returns
-    -------
-    ``skbio.alignment.Alignment``
-        The resulting alignment as an Alignment object
-
-    Notes
-    -----
-    This is a wrapper for the SSW package [1]_.
-
-    For a complete list of optional keyword-arguments that can be provided,
-    see ``skbio.alignment.StripedSmithWaterman``.
-
-    The following kwargs will not have any effect: `suppress_sequences` and
-    `zero_index`
-
-    If an alignment does not meet a provided filter, `None` will be returned.
-    
-    References
-    ----------
-    .. [1] Zhao, Mengyao, Wan-Ping Lee, Erik P. Garrison, & Gabor T.
-       Marth. "SSW Library: An SIMD Smith-Waterman C/C++ Library for
-       Applications". PLOS ONE (2013). Web. 11 July 2014.
-       http://www.plosone.org/article/info:doi/10.1371/journal.pone.0082138
-     
-    See Also
-    --------
-    skbio.alignment.StripedSmithWaterman
-
-    """
-    # We need the sequences for `Alignment` to make sense, so don't let the
-    # user suppress them.
-    kwargs['suppress_sequences'] = False
-    kwargs['zero_index'] = True
-
-    if isinstance(sequence1, ProteinSequence):
-        kwargs['protein'] = True
-
-    query = StripedSmithWaterman(str(sequence1), **kwargs)
-    alignment = query(str(sequence2))
-
-    # If there is no cigar, then it has failed a filter. Return None.
-    if not alignment.cigar:
-        return None
-
-    start_end = None
-    if alignment.query_begin != -1:
-        start_end = [
-            (alignment.query_begin, alignment.query_end),
-            (alignment.target_begin, alignment.target_end_optimal)
-        ]
-    if kwargs.get('protein', False):
-        seqs = [
-            ProteinSequence(alignment.aligned_query_sequence, id='query'),
-            ProteinSequence(alignment.aligned_target_sequence, id='target')
-        ]
-    else:
-        seqs = [
-            NucleotideSequence(alignment.aligned_query_sequence, id='query'),
-            NucleotideSequence(alignment.aligned_target_sequence, id='target')
-        ]
-
-    return Alignment(seqs, score=alignment.optimal_alignment_score,
-                     start_end_positions=start_end)
diff --git a/skbio/alignment/tests/__init__.py b/skbio/alignment/tests/__init__.py
index 0bf0c55..3fe3dc6 100644
--- a/skbio/alignment/tests/__init__.py
+++ b/skbio/alignment/tests/__init__.py
@@ -5,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/alignment/tests/test_alignment.py b/skbio/alignment/tests/test_alignment.py
index d4b4a30..80638d5 100644
--- a/skbio/alignment/tests/test_alignment.py
+++ b/skbio/alignment/tests/test_alignment.py
@@ -8,58 +8,39 @@
 
 from __future__ import absolute_import, division, print_function
 
+import six
+
 from unittest import TestCase, main
-from collections import Counter, defaultdict, OrderedDict
-try:
-    from StringIO import StringIO
-except ImportError:  # python3 system
-    from io import StringIO
-import tempfile
+from collections import Counter, defaultdict
 
 import numpy as np
-import numpy.testing as npt
 from scipy.spatial.distance import hamming
 
-from skbio import (NucleotideSequence, DNASequence, RNASequence, DNA, RNA,
+from skbio import (Sequence, DNA, RNA,
                    DistanceMatrix, Alignment, SequenceCollection)
-from skbio.alignment import (StockholmAlignment, SequenceCollectionError,
-                             StockholmParseError, AlignmentError)
+from skbio.alignment import (SequenceCollectionError, AlignmentError)
 
 
 class SequenceCollectionTests(TestCase):
     def setUp(self):
-        self.d1 = DNASequence('GATTACA', id="d1")
-        self.d2 = DNASequence('TTG', id="d2")
-        self.d3 = DNASequence('GTATACA', id="d3")
-        self.d1_lower = DNASequence('gattaca', id="d1")
-        self.d2_lower = DNASequence('ttg', id="d2")
-        self.d3_lower = DNASequence('gtataca', id="d3")
-        self.r1 = RNASequence('GAUUACA', id="r1")
-        self.r2 = RNASequence('UUG', id="r2")
-        self.r3 = RNASequence('U-----UGCC--', id="r3")
-
-        self.i1 = DNASequence('GATXACA', id="i1")
+        self.d1 = DNA('GATTACA', metadata={'id': "d1"})
+        self.d2 = DNA('TTG', metadata={'id': "d2"})
+        self.d3 = DNA('GTATACA', metadata={'id': "d3"})
+        self.r1 = RNA('GAUUACA', metadata={'id': "r1"})
+        self.r2 = RNA('UUG', metadata={'id': "r2"})
+        self.r3 = RNA('U-----UGCC--', metadata={'id': "r3"})
 
         self.seqs1 = [self.d1, self.d2]
-        self.seqs1_lower = [self.d1_lower, self.d2_lower]
         self.seqs2 = [self.r1, self.r2, self.r3]
         self.seqs3 = self.seqs1 + self.seqs2
         self.seqs4 = [self.d1, self.d3]
 
-        self.seqs1_t = [('d1', 'GATTACA'), ('d2', 'TTG')]
-        self.seqs2_t = [('r1', 'GAUUACA'), ('r2', 'UUG'),
-                        ('r3', 'U-----UGCC--')]
-        self.seqs3_t = self.seqs1_t + self.seqs2_t
-
         self.s1 = SequenceCollection(self.seqs1)
-        self.s1_lower = SequenceCollection(self.seqs1_lower)
         self.s2 = SequenceCollection(self.seqs2)
         self.s3 = SequenceCollection(self.seqs3)
         self.s4 = SequenceCollection(self.seqs4)
         self.empty = SequenceCollection([])
 
-        self.invalid_s1 = SequenceCollection([self.i1])
-
     def test_init(self):
         SequenceCollection(self.seqs1)
         SequenceCollection(self.seqs2)
@@ -71,17 +52,12 @@ class SequenceCollectionTests(TestCase):
         s1 = [self.d1, self.d1]
         self.assertRaises(SequenceCollectionError, SequenceCollection, s1)
 
-    def test_init_validate(self):
-        SequenceCollection(self.seqs1, validate=True)
-        SequenceCollection(self.seqs1, validate=True)
-        # can't validate self.seqs2 as a DNASequence
-        self.assertRaises(SequenceCollectionError, SequenceCollection,
-                          self.invalid_s1, validate=True)
-
-    def test_from_fasta_records(self):
-        SequenceCollection.from_fasta_records(self.seqs1_t, DNASequence)
-        SequenceCollection.from_fasta_records(self.seqs2_t, RNASequence)
-        SequenceCollection.from_fasta_records(self.seqs3_t, NucleotideSequence)
+    def test_init_fail_no_id(self):
+        seq = Sequence('ACGTACGT')
+        with six.assertRaisesRegex(self, SequenceCollectionError,
+                                   "'id' must be included in the sequence "
+                                   "metadata"):
+            SequenceCollection([seq])
 
     def test_contains(self):
         self.assertTrue('d1' in self.s1)
@@ -172,7 +148,13 @@ class SequenceCollectionTests(TestCase):
         self.assertEqual(count, len(self.seqs1))
         self.assertRaises(StopIteration, lambda: next(s1_iter))
 
-    def test_k_word_frequencies(self):
+    def test_kmer_frequencies(self):
+        expected1 = Counter({'GAT': 1, 'TAC': 1})
+        expected2 = Counter({'TTG': 1})
+        self.assertEqual(
+            self.s1.kmer_frequencies(k=3, overlap=False, relative=False),
+            [expected1, expected2])
+
         expected1 = defaultdict(float)
         expected1['A'] = 3 / 7.
         expected1['C'] = 1 / 7.
@@ -181,7 +163,7 @@ class SequenceCollectionTests(TestCase):
         expected2 = defaultdict(float)
         expected2['G'] = 1 / 3.
         expected2['T'] = 2 / 3.
-        self.assertEqual(self.s1.k_word_frequencies(k=1),
+        self.assertEqual(self.s1.kmer_frequencies(k=1, relative=True),
                          [expected1, expected2])
 
         expected1 = defaultdict(float)
@@ -189,16 +171,17 @@ class SequenceCollectionTests(TestCase):
         expected1['TAC'] = 1 / 2.
         expected2 = defaultdict(float)
         expected2['TTG'] = 1 / 1.
-        self.assertEqual(self.s1.k_word_frequencies(k=3, overlapping=False),
-                         [expected1, expected2])
+        self.assertEqual(
+            self.s1.kmer_frequencies(k=3, overlap=False, relative=True),
+            [expected1, expected2])
 
-        self.assertEqual(self.empty.k_word_frequencies(k=1), [])
+        self.assertEqual(self.empty.kmer_frequencies(k=1, relative=True), [])
 
         # Test to ensure floating point precision bug isn't present. See the
-        # tests for BiologicalSequence.k_word_frequencies for more details.
-        sc = SequenceCollection([RNA('C' * 10, id='s1'),
-                                 RNA('G' * 10, id='s2')])
-        self.assertEqual(sc.k_word_frequencies(1),
+        # tests for Sequence.kmer_frequencies for more details.
+        sc = SequenceCollection([RNA('C' * 10, metadata={'id': 's1'}),
+                                 RNA('G' * 10, metadata={'id': 's2'})])
+        self.assertEqual(sc.kmer_frequencies(1, relative=True),
                          [defaultdict(float, {'C': 1.0}),
                           defaultdict(float, {'G': 1.0})])
 
@@ -211,11 +194,15 @@ class SequenceCollectionTests(TestCase):
         self.assertEqual(str(self.empty), exp4)
 
     def test_distances(self):
-        s1 = SequenceCollection([DNA("ACGT", "d1"), DNA("ACGG", "d2")])
+        s1 = SequenceCollection([DNA("ACGT", metadata={'id': "d1"}),
+                                 DNA("ACGG", metadata={'id': "d2"})])
         expected = [[0, 0.25],
                     [0.25, 0]]
         expected = DistanceMatrix(expected, ['d1', 'd2'])
-        actual = s1.distances(hamming)
+
+        def h(s1, s2):
+            return hamming(s1.values, s2.values)
+        actual = s1.distances(h)
         self.assertEqual(actual, expected)
 
         # alt distance function provided
@@ -249,9 +236,10 @@ class SequenceCollectionTests(TestCase):
         self.assertEqual(actual4[2], 0.0)
 
     def test_degap(self):
-        expected = [(id_, seq.replace('.', '').replace('-', ''))
-                    for id_, seq in self.seqs2_t]
-        expected = SequenceCollection.from_fasta_records(expected, RNASequence)
+        expected = SequenceCollection([
+            RNA('GAUUACA', metadata={'id': "r1"}),
+            RNA('UUG', metadata={'id': "r2"}),
+            RNA('UUGCC', metadata={'id': "r3"})])
         actual = self.s2.degap()
         self.assertEqual(actual, expected)
 
@@ -266,151 +254,137 @@ class SequenceCollectionTests(TestCase):
                          ['d1', 'd2', 'r1', 'r2', 'r3'])
         self.assertEqual(self.empty.ids(), [])
 
-    def _assert_sequence_collections_equal(self, observed, expected):
-        """Compare SequenceCollections strictly."""
-        # TODO remove this custom equality testing code when SequenceCollection
-        # has an equals method (part of #656). We need this method to include
-        # IDs in the comparison (not part of SequenceCollection.__eq__).
-        self.assertEqual(observed, expected)
-        for obs_seq, exp_seq in zip(observed, expected):
-            self.assertTrue(obs_seq.equals(exp_seq))
-
     def test_update_ids_default_behavior(self):
         # 3 seqs
         exp_sc = SequenceCollection([
-            RNA('GAUUACA', id="1"),
-            RNA('UUG', id="2"),
-            RNA('U-----UGCC--', id="3")
+            RNA('GAUUACA', metadata={'id': "1"}),
+            RNA('UUG', metadata={'id': "2"}),
+            RNA('U-----UGCC--', metadata={'id': "3"})
         ])
         exp_id_map = {'1': 'r1', '2': 'r2', '3': 'r3'}
         obs_sc, obs_id_map = self.s2.update_ids()
-        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_sc, exp_sc)
         self.assertEqual(obs_id_map, exp_id_map)
 
         # empty
         obs_sc, obs_id_map = self.empty.update_ids()
-        self._assert_sequence_collections_equal(obs_sc, self.empty)
+        self.assertEqual(obs_sc, self.empty)
         self.assertEqual(obs_id_map, {})
 
     def test_update_ids_prefix(self):
         # 3 seqs
         exp_sc = SequenceCollection([
-            RNA('GAUUACA', id="abc1"),
-            RNA('UUG', id="abc2"),
-            RNA('U-----UGCC--', id="abc3")
+            RNA('GAUUACA', metadata={'id': "abc1"}),
+            RNA('UUG', metadata={'id': "abc2"}),
+            RNA('U-----UGCC--', metadata={'id': "abc3"})
         ])
         exp_id_map = {'abc1': 'r1', 'abc2': 'r2', 'abc3': 'r3'}
         obs_sc, obs_id_map = self.s2.update_ids(prefix='abc')
-        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_sc, exp_sc)
         self.assertEqual(obs_id_map, exp_id_map)
 
         # empty
         obs_sc, obs_id_map = self.empty.update_ids(prefix='abc')
-        self._assert_sequence_collections_equal(obs_sc, self.empty)
+        self.assertEqual(obs_sc, self.empty)
         self.assertEqual(obs_id_map, {})
 
-    def test_update_ids_fn_parameter(self):
+    def test_update_ids_func_parameter(self):
         def append_42(ids):
             return [id_ + '-42' for id_ in ids]
 
         # 3 seqs
         exp_sc = SequenceCollection([
-            RNA('GAUUACA', id="r1-42"),
-            RNA('UUG', id="r2-42"),
-            RNA('U-----UGCC--', id="r3-42")
+            RNA('GAUUACA', metadata={'id': "r1-42"}),
+            RNA('UUG', metadata={'id': "r2-42"}),
+            RNA('U-----UGCC--', metadata={'id': "r3-42"})
         ])
         exp_id_map = {'r1-42': 'r1', 'r2-42': 'r2', 'r3-42': 'r3'}
-        obs_sc, obs_id_map = self.s2.update_ids(fn=append_42)
-        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        obs_sc, obs_id_map = self.s2.update_ids(func=append_42)
+        self.assertEqual(obs_sc, exp_sc)
         self.assertEqual(obs_id_map, exp_id_map)
 
         # empty
-        obs_sc, obs_id_map = self.empty.update_ids(fn=append_42)
-        self._assert_sequence_collections_equal(obs_sc, self.empty)
+        obs_sc, obs_id_map = self.empty.update_ids(func=append_42)
+        self.assertEqual(obs_sc, self.empty)
         self.assertEqual(obs_id_map, {})
 
     def test_update_ids_ids_parameter(self):
         # 3 seqs
         exp_sc = SequenceCollection([
-            RNA('GAUUACA', id="abc"),
-            RNA('UUG', id="def"),
-            RNA('U-----UGCC--', id="ghi")
+            RNA('GAUUACA', metadata={'id': "abc"}),
+            RNA('UUG', metadata={'id': "def"}),
+            RNA('U-----UGCC--', metadata={'id': "ghi"})
         ])
         exp_id_map = {'abc': 'r1', 'def': 'r2', 'ghi': 'r3'}
         obs_sc, obs_id_map = self.s2.update_ids(ids=('abc', 'def', 'ghi'))
-        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_sc, exp_sc)
         self.assertEqual(obs_id_map, exp_id_map)
 
         # empty
         obs_sc, obs_id_map = self.empty.update_ids(ids=[])
-        self._assert_sequence_collections_equal(obs_sc, self.empty)
+        self.assertEqual(obs_sc, self.empty)
         self.assertEqual(obs_id_map, {})
 
     def test_update_ids_sequence_attributes_propagated(self):
         # 1 seq
         exp_sc = Alignment([
-            DNA('ACGT', id="abc", description='desc', quality=range(4))
+            DNA('ACGT', metadata={'id': "abc", 'description': 'desc'},
+                positional_metadata={'quality': range(4)})
         ])
         exp_id_map = {'abc': 'seq1'}
 
         obj = Alignment([
-            DNA('ACGT', id="seq1", description='desc', quality=range(4))
+            DNA('ACGT', metadata={'id': "seq1", 'description': 'desc'},
+                positional_metadata={'quality': range(4)})
         ])
 
         obs_sc, obs_id_map = obj.update_ids(ids=('abc',))
-        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_sc, exp_sc)
         self.assertEqual(obs_id_map, exp_id_map)
 
         # 2 seqs
         exp_sc = Alignment([
-            DNA('ACGT', id="abc", description='desc1', quality=range(4)),
-            DNA('TGCA', id="def", description='desc2', quality=range(4)[::-1])
+            DNA('ACGT', metadata={'id': "abc", 'description': 'desc1'},
+                positional_metadata={'quality': range(4)}),
+            DNA('TGCA', metadata={'id': "def", 'description': 'desc2'},
+                positional_metadata={'quality': range(4)[::-1]})
         ])
         exp_id_map = {'abc': 'seq1', 'def': 'seq2'}
 
         obj = Alignment([
-            DNA('ACGT', id="seq1", description='desc1', quality=(0, 1, 2, 3)),
-            DNA('TGCA', id="seq2", description='desc2', quality=(3, 2, 1, 0))
+            DNA('ACGT', metadata={'id': "seq1", 'description': 'desc1'},
+                positional_metadata={'quality': (0, 1, 2, 3)}),
+            DNA('TGCA', metadata={'id': "seq2", 'description': 'desc2'},
+                positional_metadata={'quality': (3, 2, 1, 0)})
         ])
 
         obs_sc, obs_id_map = obj.update_ids(ids=('abc', 'def'))
-        self._assert_sequence_collections_equal(obs_sc, exp_sc)
+        self.assertEqual(obs_sc, exp_sc)
         self.assertEqual(obs_id_map, exp_id_map)
 
     def test_update_ids_invalid_parameter_combos(self):
-        with self.assertRaisesRegexp(SequenceCollectionError, 'ids and fn'):
-            self.s1.update_ids(fn=lambda e: e, ids=['foo', 'bar'])
+        with six.assertRaisesRegex(self, SequenceCollectionError,
+                                   'ids and func'):
+            self.s1.update_ids(func=lambda e: e, ids=['foo', 'bar'])
 
-        with self.assertRaisesRegexp(SequenceCollectionError, 'prefix'):
+        with six.assertRaisesRegex(self, SequenceCollectionError, 'prefix'):
             self.s1.update_ids(ids=['foo', 'bar'], prefix='abc')
 
-        with self.assertRaisesRegexp(SequenceCollectionError, 'prefix'):
-            self.s1.update_ids(fn=lambda e: e, prefix='abc')
+        with six.assertRaisesRegex(self, SequenceCollectionError, 'prefix'):
+            self.s1.update_ids(func=lambda e: e, prefix='abc')
 
     def test_update_ids_invalid_ids(self):
         # incorrect number of new ids
-        with self.assertRaisesRegexp(SequenceCollectionError, '3 != 2'):
+        with six.assertRaisesRegex(self, SequenceCollectionError, '3 != 2'):
             self.s1.update_ids(ids=['foo', 'bar', 'baz'])
-        with self.assertRaisesRegexp(SequenceCollectionError, '4 != 2'):
-            self.s1.update_ids(fn=lambda e: ['foo', 'bar', 'baz', 'abc'])
+        with six.assertRaisesRegex(self, SequenceCollectionError, '4 != 2'):
+            self.s1.update_ids(func=lambda e: ['foo', 'bar', 'baz', 'abc'])
 
         # duplicates
-        with self.assertRaisesRegexp(SequenceCollectionError, 'foo'):
+        with six.assertRaisesRegex(self, SequenceCollectionError, 'foo'):
             self.s2.update_ids(ids=['foo', 'bar', 'foo'])
-        with self.assertRaisesRegexp(SequenceCollectionError, 'bar'):
-            self.s2.update_ids(fn=lambda e: ['foo', 'bar', 'bar'])
-
-    def test_int_map(self):
-        expected1 = {"1": self.d1, "2": self.d2}
-        expected2 = {"1": "d1", "2": "d2"}
-        obs = npt.assert_warns(DeprecationWarning, self.s1.int_map)
-        self.assertEqual(obs, (expected1, expected2))
-
-        expected1 = {"h-1": self.d1, "h-2": self.d2}
-        expected2 = {"h-1": "d1", "h-2": "d2"}
-        obs = npt.assert_warns(DeprecationWarning, self.s1.int_map,
-                               prefix='h-')
-        self.assertEqual(obs, (expected1, expected2))
+        with six.assertRaisesRegex(self, SequenceCollectionError, 'bar'):
+            self.s2.update_ids(func=lambda e: ['foo', 'bar', 'bar'])
 
     def test_is_empty(self):
         self.assertFalse(self.s1.is_empty())
@@ -419,20 +393,9 @@ class SequenceCollectionTests(TestCase):
 
         self.assertTrue(self.empty.is_empty())
 
-    def test_is_valid(self):
-        self.assertTrue(self.s1.is_valid())
-        self.assertTrue(self.s2.is_valid())
-        self.assertTrue(self.s3.is_valid())
-        self.assertTrue(self.empty.is_valid())
-
-        self.assertFalse(self.invalid_s1.is_valid())
-
     def test_iteritems(self):
         self.assertEqual(list(self.s1.iteritems()),
-                         [(s.id, s) for s in self.s1])
-
-    def test_lower(self):
-        self.assertEqual(self.s1.lower(), self.s1_lower)
+                         [(s.metadata['id'], s) for s in self.s1])
 
     def test_sequence_count(self):
         self.assertEqual(self.s1.sequence_count(), 2)
@@ -446,38 +409,20 @@ class SequenceCollectionTests(TestCase):
         self.assertEqual(self.s3.sequence_lengths(), [7, 3, 7, 3, 12])
         self.assertEqual(self.empty.sequence_lengths(), [])
 
-    def test_to_fasta(self):
-        exp1 = ">d1\nGATTACA\n>d2\nTTG\n"
-        self.assertEqual(self.s1.to_fasta(), exp1)
-        exp2 = ">r1\nGAUUACA\n>r2\nUUG\n>r3\nU-----UGCC--\n"
-        self.assertEqual(self.s2.to_fasta(), exp2)
-
-    def test_toFasta(self):
-        exp = ">d1\nGATTACA\n>d2\nTTG\n"
-        obs = npt.assert_warns(DeprecationWarning, self.s1.toFasta)
-        self.assertEqual(obs, exp)
-
-    def test_upper(self):
-        self.assertEqual(self.s1_lower.upper(), self.s1)
-
 
 class AlignmentTests(TestCase):
 
     def setUp(self):
-        self.d1 = DNASequence('..ACC-GTTGG..', id="d1")
-        self.d2 = DNASequence('TTACCGGT-GGCC', id="d2")
-        self.d3 = DNASequence('.-ACC-GTTGC--', id="d3")
+        self.d1 = DNA('..ACC-GTTGG..', metadata={'id': "d1"})
+        self.d2 = DNA('TTACCGGT-GGCC', metadata={'id': "d2"})
+        self.d3 = DNA('.-ACC-GTTGC--', metadata={'id': "d3"})
 
-        self.r1 = RNASequence('UUAU-', id="r1")
-        self.r2 = RNASequence('ACGUU', id="r2")
+        self.r1 = RNA('UUAU-', metadata={'id': "r1"})
+        self.r2 = RNA('ACGUU', metadata={'id': "r2"})
 
         self.seqs1 = [self.d1, self.d2, self.d3]
         self.seqs2 = [self.r1, self.r2]
 
-        self.seqs1_t = [('d1', '..ACC-GTTGG..'), ('d2', 'TTACCGGT-GGCC'),
-                        ('d3', '.-ACC-GTTGC--')]
-        self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')]
-
         self.a1 = Alignment(self.seqs1)
         self.a2 = Alignment(self.seqs2)
         self.a3 = Alignment(self.seqs2, score=42.0,
@@ -489,18 +434,20 @@ class AlignmentTests(TestCase):
         self.empty = Alignment([])
 
         # sequences, but no positions
-        self.no_positions = Alignment([RNA('', id='a'), RNA('', id='b')])
+        self.no_positions = Alignment([RNA('', metadata={'id': 'a'}),
+                                       RNA('', metadata={'id': 'b'})])
 
     def test_degap(self):
-        expected = [(id_, seq.replace('.', '').replace('-', ''))
-                    for id_, seq in self.seqs1_t]
-        expected = SequenceCollection.from_fasta_records(expected, DNASequence)
+        expected = SequenceCollection([
+            DNA('ACCGTTGG', metadata={'id': "d1"}),
+            DNA('TTACCGGTGGCC', metadata={'id': "d2"}),
+            DNA('ACCGTTGC', metadata={'id': "d3"})])
         actual = self.a1.degap()
         self.assertEqual(actual, expected)
 
-        expected = [(id_, seq.replace('.', '').replace('-', ''))
-                    for id_, seq in self.seqs2_t]
-        expected = SequenceCollection.from_fasta_records(expected, RNASequence)
+        expected = SequenceCollection([
+            RNA('UUAU', metadata={'id': "r1"}),
+            RNA('ACGUU', metadata={'id': "r2"})])
         actual = self.a2.degap()
         self.assertEqual(actual, expected)
 
@@ -555,26 +502,26 @@ class AlignmentTests(TestCase):
 
         # keep positions
         actual = self.a1.subalignment(positions_to_keep=[0, 2, 3])
-        d1 = DNASequence('.AC', id="d1")
-        d2 = DNASequence('TAC', id="d2")
-        d3 = DNASequence('.AC', id="d3")
+        d1 = DNA('.AC', metadata={'id': "d1"})
+        d2 = DNA('TAC', metadata={'id': "d2"})
+        d3 = DNA('.AC', metadata={'id': "d3"})
         expected = Alignment([d1, d2, d3])
         self.assertEqual(actual, expected)
 
         # keep positions (invert)
         actual = self.a1.subalignment(positions_to_keep=[0, 2, 3],
                                       invert_positions_to_keep=True)
-        d1 = DNASequence('.C-GTTGG..', id="d1")
-        d2 = DNASequence('TCGGT-GGCC', id="d2")
-        d3 = DNASequence('-C-GTTGC--', id="d3")
+        d1 = DNA('.C-GTTGG..', metadata={'id': "d1"})
+        d2 = DNA('TCGGT-GGCC', metadata={'id': "d2"})
+        d3 = DNA('-C-GTTGC--', metadata={'id': "d3"})
         expected = Alignment([d1, d2, d3])
         self.assertEqual(actual, expected)
 
         # keep seqs and positions
         actual = self.a1.subalignment(seqs_to_keep=[0, 2],
                                       positions_to_keep=[0, 2, 3])
-        d1 = DNASequence('.AC', id="d1")
-        d3 = DNASequence('.AC', id="d3")
+        d1 = DNA('.AC', metadata={'id': "d1"})
+        d3 = DNA('.AC', metadata={'id': "d3"})
         expected = Alignment([d1, d3])
         self.assertEqual(actual, expected)
 
@@ -583,7 +530,7 @@ class AlignmentTests(TestCase):
                                       positions_to_keep=[0, 2, 3],
                                       invert_seqs_to_keep=True,
                                       invert_positions_to_keep=True)
-        d2 = DNASequence('TCGGT-GGCC', id="d2")
+        d2 = DNA('TCGGT-GGCC', metadata={'id': "d2"})
         expected = Alignment([d2])
         self.assertEqual(actual, expected)
 
@@ -601,7 +548,7 @@ class AlignmentTests(TestCase):
 
     def test_init_not_equal_lengths(self):
         invalid_seqs = [self.d1, self.d2, self.d3,
-                        DNASequence('.-ACC-GTGC--', id="i2")]
+                        DNA('.-ACC-GTGC--', metadata={'id': "i2"})]
         self.assertRaises(AlignmentError, Alignment,
                           invalid_seqs)
 
@@ -609,20 +556,15 @@ class AlignmentTests(TestCase):
         seqs = [self.d1, self.d2, self.d3]
         Alignment(seqs)
 
-    def test_init_validate(self):
-        Alignment(self.seqs1, validate=True)
-
-        # invalid DNA character
-        invalid_seqs1 = [self.d1, self.d2, self.d3,
-                         DNASequence('.-ACC-GTXGC--', id="i1")]
-        self.assertRaises(SequenceCollectionError, Alignment,
-                          invalid_seqs1, validate=True)
-
     def test_iter_positions(self):
         actual = list(self.a2.iter_positions())
-        expected = [[RNASequence(j) for j in i] for i in
-                    ['UA', 'UC', 'AG', 'UU', '-U']]
-        self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')]
+        expected = [
+            [RNA('U', metadata={'id': 'r1'}), RNA('A', metadata={'id': 'r2'})],
+            [RNA('U', metadata={'id': 'r1'}), RNA('C', metadata={'id': 'r2'})],
+            [RNA('A', metadata={'id': 'r1'}), RNA('G', metadata={'id': 'r2'})],
+            [RNA('U', metadata={'id': 'r1'}), RNA('U', metadata={'id': 'r2'})],
+            [RNA('-', metadata={'id': 'r1'}), RNA('U', metadata={'id': 'r2'})]
+        ]
         self.assertEqual(actual, expected)
 
         actual = list(self.a2.iter_positions(constructor=str))
@@ -631,46 +573,46 @@ class AlignmentTests(TestCase):
                     list('AG'),
                     list('UU'),
                     list('-U')]
-        self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')]
         self.assertEqual(actual, expected)
 
     def test_majority_consensus(self):
-        d1 = DNASequence('TTT', id="d1")
-        d2 = DNASequence('TT-', id="d2")
-        d3 = DNASequence('TC-', id="d3")
+        # empty cases
+        self.assertEqual(
+            self.empty.majority_consensus(), Sequence(''))
+        self.assertEqual(
+            self.no_positions.majority_consensus(), RNA(''))
+
+        # alignment where all sequences are the same
+        aln = Alignment([DNA('AG', metadata={'id': 'a'}),
+                         DNA('AG', metadata={'id': 'b'})])
+        self.assertEqual(aln.majority_consensus(), DNA('AG'))
+
+        # no ties
+        d1 = DNA('TTT', metadata={'id': "d1"})
+        d2 = DNA('TT-', metadata={'id': "d2"})
+        d3 = DNA('TC-', metadata={'id': "d3"})
         a1 = Alignment([d1, d2, d3])
-        self.assertTrue(a1.majority_consensus().equals(DNASequence('TT-')))
+        self.assertEqual(a1.majority_consensus(), DNA('TT-'))
 
-        d1 = DNASequence('T', id="d1")
-        d2 = DNASequence('A', id="d2")
+        # ties
+        d1 = DNA('T', metadata={'id': "d1"})
+        d2 = DNA('A', metadata={'id': "d2"})
         a1 = Alignment([d1, d2])
         self.assertTrue(a1.majority_consensus() in
-                        [DNASequence('T'), DNASequence('A')])
-
-        self.assertEqual(self.empty.majority_consensus(), '')
-
-    def test_majority_consensus_constructor(self):
-        d1 = DNASequence('TTT', id="d1")
-        d2 = DNASequence('TT-', id="d2")
-        d3 = DNASequence('TC-', id="d3")
-        a1 = Alignment([d1, d2, d3])
-
-        obs = npt.assert_warns(DeprecationWarning, a1.majority_consensus,
-                               constructor=str)
-        self.assertEqual(obs, 'TT-')
+                        [DNA('T'), DNA('A')])
 
     def test_omit_gap_positions(self):
         expected = self.a2
         self.assertEqual(self.a2.omit_gap_positions(1.0), expected)
         self.assertEqual(self.a2.omit_gap_positions(0.51), expected)
 
-        r1 = RNASequence('UUAU', id="r1")
-        r2 = RNASequence('ACGU', id="r2")
+        r1 = RNA('UUAU', metadata={'id': "r1"})
+        r2 = RNA('ACGU', metadata={'id': "r2"})
         expected = Alignment([r1, r2])
         self.assertEqual(self.a2.omit_gap_positions(0.49), expected)
 
-        r1 = RNASequence('UUAU', id="r1")
-        r2 = RNASequence('ACGU', id="r2")
+        r1 = RNA('UUAU', metadata={'id': "r1"})
+        r2 = RNA('ACGU', metadata={'id': "r2"})
         expected = Alignment([r1, r2])
         self.assertEqual(self.a2.omit_gap_positions(0.0), expected)
 
@@ -682,10 +624,11 @@ class AlignmentTests(TestCase):
         # tests for Alignment.position_frequencies for more details.
         seqs = []
         for i in range(33):
-            seqs.append(DNA('-.', id=str(i)))
+            seqs.append(DNA('-.', metadata={'id': str(i)}))
         aln = Alignment(seqs)
         self.assertEqual(aln.omit_gap_positions(1 - np.finfo(float).eps),
-                         Alignment([DNA('', id=str(i)) for i in range(33)]))
+                         Alignment([DNA('', metadata={'id': str(i)})
+                                    for i in range(33)]))
 
     def test_omit_gap_sequences(self):
         expected = self.a2
@@ -701,7 +644,8 @@ class AlignmentTests(TestCase):
 
         # Test to ensure floating point precision bug isn't present. See the
         # tests for Alignment.position_frequencies for more details.
-        aln = Alignment([DNA('.' * 33, id='abc'), DNA('-' * 33, id='def')])
+        aln = Alignment([DNA('.' * 33, metadata={'id': 'abc'}),
+                         DNA('-' * 33, metadata={'id': 'def'})])
         self.assertEqual(aln.omit_gap_sequences(1 - np.finfo(float).eps),
                          Alignment([]))
 
@@ -745,7 +689,7 @@ class AlignmentTests(TestCase):
         # cannot be represented exactly as a floating point number.
         seqs = []
         for i in range(10):
-            seqs.append(DNA('A', id=str(i)))
+            seqs.append(DNA('A', metadata={'id': str(i)}))
         aln = Alignment(seqs)
         self.assertEqual(aln.position_frequencies(),
                          [defaultdict(float, {'A': 1.0})])
@@ -764,11 +708,11 @@ class AlignmentTests(TestCase):
         np.testing.assert_almost_equal(self.empty.position_entropies(base=2),
                                        [])
 
-    def test_k_word_frequencies(self):
+    def test_kmer_frequencies(self):
         expected = [defaultdict(float, {'U': 3 / 5, 'A': 1 / 5, '-': 1 / 5}),
                     defaultdict(float, {'A': 1 / 5, 'C': 1 / 5, 'G': 1 / 5,
                                         'U': 2 / 5})]
-        actual = self.a2.k_word_frequencies(k=1)
+        actual = self.a2.kmer_frequencies(k=1, relative=True)
         for a, e in zip(actual, expected):
             self.assertEqual(sorted(a), sorted(e), 5)
             np.testing.assert_almost_equal(sorted(a.values()),
@@ -779,350 +723,13 @@ class AlignmentTests(TestCase):
         self.assertEqual(self.a2.sequence_length(), 5)
         self.assertEqual(self.empty.sequence_length(), 0)
 
-    def test_to_phylip(self):
-        d1 = DNASequence('..ACC-GTTGG..', id="d1")
-        d2 = DNASequence('TTACCGGT-GGCC', id="d2")
-        d3 = DNASequence('.-ACC-GTTGC--', id="d3")
-        a = Alignment([d1, d2, d3])
-
-        phylip_str, id_map = npt.assert_warns(DeprecationWarning, a.to_phylip,
-                                              map_labels=False)
-        self.assertEqual(id_map, {'d1': 'd1',
-                                  'd3': 'd3',
-                                  'd2': 'd2'})
-        expected = "\n".join(["3 13",
-                              "d1 ..ACC-GTTGG..",
-                              "d2 TTACCGGT-GGCC",
-                              "d3 .-ACC-GTTGC--"])
-        self.assertEqual(phylip_str, expected)
-
-    def test_to_phylip_map_labels(self):
-        d1 = DNASequence('..ACC-GTTGG..', id="d1")
-        d2 = DNASequence('TTACCGGT-GGCC', id="d2")
-        d3 = DNASequence('.-ACC-GTTGC--', id="d3")
-        a = Alignment([d1, d2, d3])
-
-        phylip_str, id_map = npt.assert_warns(DeprecationWarning, a.to_phylip,
-                                              map_labels=True,
-                                              label_prefix="s")
-        self.assertEqual(id_map, {'s1': 'd1',
-                                  's3': 'd3',
-                                  's2': 'd2'})
-        expected = "\n".join(["3 13",
-                              "s1 ..ACC-GTTGG..",
-                              "s2 TTACCGGT-GGCC",
-                              "s3 .-ACC-GTTGC--"])
-        self.assertEqual(phylip_str, expected)
-
-    def test_to_phylip_no_sequences(self):
-        with self.assertRaises(SequenceCollectionError):
-            npt.assert_warns(DeprecationWarning, Alignment([]).to_phylip)
-
-    def test_to_phylip_no_positions(self):
-        d1 = DNASequence('', id="d1")
-        d2 = DNASequence('', id="d2")
-        a = Alignment([d1, d2])
-
-        with self.assertRaises(SequenceCollectionError):
-            npt.assert_warns(DeprecationWarning, a.to_phylip)
-
     def test_validate_lengths(self):
         self.assertTrue(self.a1._validate_lengths())
         self.assertTrue(self.a2._validate_lengths())
         self.assertTrue(self.empty._validate_lengths())
 
         self.assertTrue(Alignment([
-            DNASequence('TTT', id="d1")])._validate_lengths())
-
-
-class StockholmAlignmentTests(TestCase):
-    def setUp(self):
-        self.seqs = [DNASequence("ACC-G-GGTA", id="seq1"),
-                     DNASequence("TCC-G-GGCA", id="seq2")]
-        self.GF = OrderedDict([
-            ("AC", "RF00360"),
-            ("BM", ["cmbuild  -F CM SEED",
-                    "cmsearch  -Z 274931 -E 1000000"]),
-            ("SQ", "9"),
-            ("RT", ["TITLE1",  "TITLE2"]),
-            ("RN", ["[1]", "[2]"]),
-            ("RA", ["Auth1;", "Auth2;"]),
-            ("RL", ["J Mol Biol", "Cell"]),
-            ("RM", ["11469857", "12007400"]),
-            ('RN', ['[1]', '[2]'])
-        ])
-        self.GS = {"AC": OrderedDict([("seq1", "111"), ("seq2", "222")])}
-        self.GR = {"SS": OrderedDict([("seq1", "1110101111"),
-                                      ("seq2", "0110101110")])}
-        self.GC = {"SS_cons": "(((....)))"}
-        self.st = StockholmAlignment(self.seqs, gc=self.GC, gf=self.GF,
-                                     gs=self.GS, gr=self.GR)
-
-    def test_retrieve_metadata(self):
-        self.assertEqual(self.st.gc, self.GC)
-        self.assertEqual(self.st.gf, self.GF)
-        self.assertEqual(self.st.gs, self.GS)
-        self.assertEqual(self.st.gr, self.GR)
-
-    def test_from_file_alignment(self):
-        # test that a basic stockholm file with interleaved alignment can be
-        # parsed
-        sto = StringIO("# STOCKHOLM 1.0\n"
-                       "seq1      ACC-G\n"
-                       "seq2      TCC-G\n\n"
-                       "seq1      -GGTA\n"
-                       "seq2      -GGCA\n//")
-        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
-        exp_sto = StockholmAlignment(self.seqs)
-        self.assertEqual(obs_sto, exp_sto)
-
-    def test_from_file_GF(self):
-        # remove rn line to make sure auto-added
-        self.GF.pop("RN")
-        sto = StringIO("# STOCKHOLM 1.0\n#=GF RN [1]\n#=GF RM 11469857\n"
-                       "#=GF RT TITLE1\n#=GF RA Auth1;\n#=GF RL J Mol Biol\n"
-                       "#=GF RN [2]\n#=GF RM 12007400\n#=GF RT TITLE2\n"
-                       "#=GF RA Auth2;\n#=GF RL Cell\n#=GF AC RF00360\n"
-                       "#=GF BM cmbuild  -F CM SEED\n"
-                       "#=GF BM cmsearch  -Z 274931 -E 1000000\n#=GF SQ 9\n"
-                       "seq1         ACC-G-GGTA\nseq2         TCC-G-GGCA\n//")
-        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
-        exp_sto = StockholmAlignment(self.seqs, self.GF, {}, {}, {})
-        self.assertEqual(obs_sto, exp_sto)
-
-    def test_from_file_GC(self):
-        sto = StringIO("# STOCKHOLM 1.0\n"
-                       "seq1         ACC-G-GGTA\nseq2         TCC-G-GGCA\n"
-                       "#=GC SS_cons (((....)))\n//")
-        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
-        exp_sto = StockholmAlignment(self.seqs, {}, {}, {}, self.GC)
-        self.assertEqual(obs_sto, exp_sto)
-
-    def test_from_file_GS(self):
-        sto = StringIO("# STOCKHOLM 1.0\n#=GS seq2 AC 222\n#=GS seq1 AC 111\n"
-                       "seq1          ACC-G-GGTA\n"
-                       "seq2          TCC-G-GGCA\n//")
-        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
-        exp_sto = StockholmAlignment(self.seqs, {}, self.GS, {}, {})
-        self.assertEqual(obs_sto, exp_sto)
-
-    def test_from_file_GR(self):
-        sto = StringIO("# STOCKHOLM 1.0\nseq1          ACC-G\n"
-                       "#=GR seq1 SS  11101\nseq2          TCC-G\n"
-                       "#=GR seq2 SS  01101\n\nseq1          -GGTA\n"
-                       "#=GR seq1 SS  01111\nseq2          -GGCA\n"
-                       "#=GR seq2 SS  01110\n//")
-        obs_sto = next(StockholmAlignment.from_file(sto, DNA))
-        exp_sto = StockholmAlignment(self.seqs, {}, {}, self.GR, {})
-        self.assertEqual(obs_sto, exp_sto)
-
-    def test_from_file_multi(self):
-        sto = StringIO("# STOCKHOLM 1.0\n#=GS seq2 AC 222\n#=GS seq1 AC 111\n"
-                       "seq1          ACC-G-GGTA\n"
-                       "seq2          TCC-G-GGCA\n//\n"
-                       "# STOCKHOLM 1.0\nseq1          ACC-G-GGTA\n"
-                       "#=GR seq1 SS  1110101111\nseq2          TCC-G-GGCA\n"
-                       "#=GR seq2 SS  0110101110\n//")
-        obs_sto = StockholmAlignment.from_file(sto, DNA)
-        count = 0
-        for obs in obs_sto:
-            if count == 0:
-                exp_sto = StockholmAlignment(self.seqs, {}, self.GS, {}, {})
-                self.assertEqual(obs, exp_sto)
-            elif count == 1:
-                exp_sto = StockholmAlignment(self.seqs, {}, {}, self.GR, {})
-                self.assertEqual(obs, exp_sto)
-            else:
-                raise AssertionError("More than 2 sto alignments parsed!")
-            count += 1
-
-    def test_parse_gf_multiline_nh(self):
-        sto = ["#=GF TN MULTILINE TREE",
-               "#=GF NH THIS IS FIRST", "#=GF NH THIS IS SECOND",
-               "#=GF AC 1283394"]
-        exp = {'TN': 'MULTILINE TREE',
-               'NH': 'THIS IS FIRST THIS IS SECOND',
-               'AC': '1283394'}
-        self.assertEqual(self.st._parse_gf_info(sto), exp)
-
-    def test_parse_gf_multiline_cc(self):
-        sto = ["#=GF CC THIS IS FIRST", "#=GF CC THIS IS SECOND"]
-        exp = {'CC': 'THIS IS FIRST THIS IS SECOND'}
-        self.assertEqual(self.st._parse_gf_info(sto), exp)
-
-    def test_parse_gf_info_nongf(self):
-        sto = ["#=GF AC BLAAAAAAAHHH", "#=GC HUH THIS SHOULD NOT BE HERE"]
-        with self.assertRaises(StockholmParseError):
-            self.st._parse_gf_info(sto)
-
-    def test_parse_gf_info_malformed(self):
-        # too short of a line
-        sto = ["#=GF AC", "#=GF"]
-        with self.assertRaises(StockholmParseError):
-            self.st._parse_gf_info(sto)
-
-    def test_parse_gc_info_nongf(self):
-        sto = ["#=GC AC BLAAAAAAAHHH", "#=GF HUH THIS SHOULD NOT BE HERE"]
-        with self.assertRaises(StockholmParseError):
-            self.st._parse_gf_info(sto)
-
-    def test_parse_gc_info_strict_len(self):
-        sto = ["#=GC SS_cons (((..)))"]
-        with self.assertRaises(StockholmParseError):
-            self.st._parse_gc_info(sto, seqlen=20, strict=True)
-
-    def test_parse_gc_info_strict_duplicate(self):
-        sto = ["#=GC SS_cons (((..)))", "#=GC SS_cons (((..)))"]
-        with self.assertRaises(StockholmParseError):
-            self.st._parse_gc_info(sto, seqlen=8, strict=True)
-
-    def test_parse_gc_info_malformed(self):
-        # too short of a line
-        sto = ["#=GC AC BLAAAAAAAHHH", "#=GC"]
-        with self.assertRaises(StockholmParseError):
-            self.st._parse_gc_info(sto)
-
-    def test_parse_gs_gr_info_mixed(self):
-        sto = ["#=GS seq1 AC BLAAA", "#=GR seq2 HUH THIS SHOULD NOT BE HERE"]
-        with self.assertRaises(StockholmParseError):
-            self.st._parse_gs_gr_info(sto)
-
-    def test_parse_gs_gr_info_malformed(self):
-        # too short of a line
-        sto = ["#=GS AC BLAAAAAAAHHH", "#=GS"]
-        with self.assertRaises(StockholmParseError):
-            self.st._parse_gs_gr_info(sto)
-
-    def test_parse_gs_gr_info_strict(self):
-        sto = ["#=GR seq1 SS  10101111", "#=GR seq2 SS  01101"]
-        with self.assertRaises(StockholmParseError):
-            self.st._parse_gs_gr_info(sto, seqlen=20, strict=True)
-
-    def test_str(self):
-        st = StockholmAlignment(self.seqs, gc=self.GC, gf=self.GF, gs=self.GS,
-                                gr=self.GR)
-        obs = str(st)
-        exp = ('# STOCKHOLM 1.0\n'
-               '#=GF AC RF00360\n'
-               '#=GF BM cmbuild  -F CM SEED\n'
-               '#=GF BM cmsearch  -Z 274931 -E 1000000\n'
-               '#=GF SQ 9\n'
-               '#=GF RN [1]\n'
-               '#=GF RM 11469857\n'
-               '#=GF RT TITLE1\n'
-               '#=GF RA Auth1;\n'
-               '#=GF RL J Mol Biol\n'
-               '#=GF RN [2]\n'
-               '#=GF RM 12007400\n'
-               '#=GF RT TITLE2\n'
-               '#=GF RA Auth2;\n'
-               '#=GF RL Cell\n'
-               '#=GS seq1 AC 111\n'
-               '#=GS seq2 AC 222\n'
-               'seq1          ACC-G-GGTA\n'
-               '#=GR seq1 SS  1110101111\n'
-               'seq2          TCC-G-GGCA\n'
-               '#=GR seq2 SS  0110101110\n'
-               '#=GC SS_cons  (((....)))\n//')
-        self.assertEqual(obs, exp)
-
-    def test_to_file(self):
-        st = StockholmAlignment(self.seqs, gc=self.GC, gf=self.GF, gs=self.GS,
-                                gr=self.GR)
-
-        with tempfile.NamedTemporaryFile('r+') as temp_file:
-            st.to_file(temp_file)
-            temp_file.flush()
-            temp_file.seek(0)
-            obs = temp_file.read()
-            exp = ('# STOCKHOLM 1.0\n'
-                   '#=GF AC RF00360\n'
-                   '#=GF BM cmbuild  -F CM SEED\n'
-                   '#=GF BM cmsearch  -Z 274931 -E 1000000\n'
-                   '#=GF SQ 9\n'
-                   '#=GF RN [1]\n'
-                   '#=GF RM 11469857\n'
-                   '#=GF RT TITLE1\n'
-                   '#=GF RA Auth1;\n'
-                   '#=GF RL J Mol Biol\n'
-                   '#=GF RN [2]\n'
-                   '#=GF RM 12007400\n'
-                   '#=GF RT TITLE2\n'
-                   '#=GF RA Auth2;\n'
-                   '#=GF RL Cell\n'
-                   '#=GS seq1 AC 111\n'
-                   '#=GS seq2 AC 222\n'
-                   'seq1          ACC-G-GGTA\n'
-                   '#=GR seq1 SS  1110101111\n'
-                   'seq2          TCC-G-GGCA\n'
-                   '#=GR seq2 SS  0110101110\n'
-                   '#=GC SS_cons  (((....)))\n//')
-        self.assertEqual(obs, exp)
-
-    def test_str_gc(self):
-        st = StockholmAlignment(self.seqs, gc=self.GC, gf=None, gs=None,
-                                gr=None)
-        obs = str(st)
-        exp = ("# STOCKHOLM 1.0\nseq1          ACC-G-GGTA\n"
-               "seq2          TCC-G-GGCA\n"
-               "#=GC SS_cons  (((....)))\n//")
-        self.assertEqual(obs, exp)
-
-    def test_str_gf(self):
-        st = StockholmAlignment(self.seqs, gc=None, gf=self.GF, gs=None,
-                                gr=None)
-        obs = str(st)
-        exp = ('# STOCKHOLM 1.0\n'
-               '#=GF AC RF00360\n'
-               '#=GF BM cmbuild  -F CM SEED\n'
-               '#=GF BM cmsearch  -Z 274931 -E 1000000\n'
-               '#=GF SQ 9\n'
-               '#=GF RN [1]\n'
-               '#=GF RM 11469857\n'
-               '#=GF RT TITLE1\n'
-               '#=GF RA Auth1;\n'
-               '#=GF RL J Mol Biol\n'
-               '#=GF RN [2]\n'
-               '#=GF RM 12007400\n'
-               '#=GF RT TITLE2\n'
-               '#=GF RA Auth2;\n'
-               '#=GF RL Cell\n'
-               'seq1          ACC-G-GGTA\n'
-               'seq2          TCC-G-GGCA\n//')
-        self.assertEqual(obs, exp)
-
-    def test_str_gs(self):
-        st = StockholmAlignment(self.seqs, gc=None, gf=None, gs=self.GS,
-                                gr=None)
-        obs = str(st)
-        exp = ('# STOCKHOLM 1.0\n'
-               '#=GS seq1 AC 111\n'
-               '#=GS seq2 AC 222\n'
-               'seq1          ACC-G-GGTA\n'
-               'seq2          TCC-G-GGCA\n//')
-        self.assertEqual(obs, exp)
-
-    def test_str_gr(self):
-        st = StockholmAlignment(self.seqs, gc=None, gf=None, gs=None,
-                                gr=self.GR)
-        obs = str(st)
-        exp = ("# STOCKHOLM 1.0\nseq1          ACC-G-GGTA\n"
-               "#=GR seq1 SS  1110101111\nseq2          TCC-G-GGCA\n"
-               "#=GR seq2 SS  0110101110\n//")
-        self.assertEqual(obs, exp)
-
-    def test_str_trees(self):
-        GF = OrderedDict({"NH": ["IMATREE", "IMATREETOO"],
-                          "TN": ["Tree2", "Tree1"]})
-        st = StockholmAlignment(self.seqs, gc=None, gf=GF, gs=None,
-                                gr=None)
-        obs = str(st)
-        exp = ("# STOCKHOLM 1.0\n#=GF TN Tree2\n#=GF NH IMATREE\n#=GF TN Tree1"
-               "\n#=GF NH IMATREETOO\nseq1          ACC-G-GGTA\n"
-               "seq2          TCC-G-GGCA\n//")
-
-        self.assertEqual(obs, exp)
-
+            DNA('TTT', metadata={'id': "d1"})])._validate_lengths())
 
 if __name__ == "__main__":
     main()
diff --git a/skbio/alignment/tests/test_pairwise.py b/skbio/alignment/tests/test_pairwise.py
index eed59da..75497e4 100644
--- a/skbio/alignment/tests/test_pairwise.py
+++ b/skbio/alignment/tests/test_pairwise.py
@@ -13,7 +13,7 @@ import warnings
 
 import numpy as np
 
-from skbio import Protein, DNA, BiologicalSequence, Alignment
+from skbio import Protein, DNA, Alignment
 from skbio.alignment import (
     global_pairwise_align_protein, local_pairwise_align_protein,
     global_pairwise_align_nucleotide, local_pairwise_align_nucleotide,
@@ -88,7 +88,8 @@ class PairwiseAlignmentTests(TestCase):
         # Protein (rather than str) as input
         expected = ("HEAGAWGHEE-", "---PAW-HEAE", 23.0)
         actual = global_pairwise_align_protein(
-            Protein("HEAGAWGHEE", "s1"), Protein("PAWHEAE", "s2"),
+            Protein("HEAGAWGHEE", metadata={'id': "s1"}),
+            Protein("PAWHEAE", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5.)
         self.assertEqual(str(actual[0]), expected[0])
         self.assertEqual(str(actual[1]), expected[1])
@@ -99,8 +100,8 @@ class PairwiseAlignmentTests(TestCase):
         # One Alignment and one Protein as input
         expected = ("HEAGAWGHEE-", "---PAW-HEAE", 23.0)
         actual = global_pairwise_align_protein(
-            Alignment([Protein("HEAGAWGHEE", "s1")]),
-            Protein("PAWHEAE", "s2"),
+            Alignment([Protein("HEAGAWGHEE", metadata={'id': "s1"})]),
+            Protein("PAWHEAE", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5.)
         self.assertEqual(str(actual[0]), expected[0])
         self.assertEqual(str(actual[1]), expected[1])
@@ -112,9 +113,9 @@ class PairwiseAlignmentTests(TestCase):
         # alignment as input. Score confirmed manually.
         expected = ("HEAGAWGHEE-", "HDAGAWGHDE-", "---PAW-HEAE", 21.0)
         actual = global_pairwise_align_protein(
-            Alignment([Protein("HEAGAWGHEE", "s1"),
-                       Protein("HDAGAWGHDE", "s2")]),
-            Alignment([Protein("PAWHEAE", "s3")]),
+            Alignment([Protein("HEAGAWGHEE", metadata={'id': "s1"}),
+                       Protein("HDAGAWGHDE", metadata={'id': "s2"})]),
+            Alignment([Protein("PAWHEAE", metadata={'id': "s3"})]),
             gap_open_penalty=10., gap_extend_penalty=5.)
         self.assertEqual(str(actual[0]), expected[0])
         self.assertEqual(str(actual[1]), expected[1])
@@ -125,7 +126,8 @@ class PairwiseAlignmentTests(TestCase):
 
         # ids are provided if they're not passed in
         actual = global_pairwise_align_protein(
-            Protein("HEAGAWGHEE"), Protein("PAWHEAE"),
+            Protein("HEAGAWGHEE"),
+            Protein("PAWHEAE"),
             gap_open_penalty=10., gap_extend_penalty=5.)
         self.assertEqual(actual.ids(), list('01'))
 
@@ -201,7 +203,8 @@ class PairwiseAlignmentTests(TestCase):
         expected = ("AWGHE", "AW-HE", 26.0, 4, 1)
         # Protein (rather than str) as input
         actual = local_pairwise_align_protein(
-            Protein("HEAGAWGHEE", "s1"), Protein("PAWHEAE", "s2"),
+            Protein("HEAGAWGHEE", metadata={'id': "s1"}),
+            Protein("PAWHEAE", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5.)
         self.assertEqual(str(actual[0]), expected[0])
         self.assertEqual(str(actual[1]), expected[1])
@@ -211,17 +214,21 @@ class PairwiseAlignmentTests(TestCase):
 
         # Fails when either input is passed as an Alignment
         self.assertRaises(TypeError, local_pairwise_align_protein,
-                          Alignment([Protein("HEAGAWGHEE", "s1")]),
-                          Protein("PAWHEAE", "s2"), gap_open_penalty=10.,
+                          Alignment([Protein("HEAGAWGHEE",
+                                             metadata={'id': "s1"})]),
+                          Protein("PAWHEAE", metadata={'id': "s2"}),
+                          gap_open_penalty=10.,
                           gap_extend_penalty=5.)
         self.assertRaises(TypeError, local_pairwise_align_protein,
-                          Protein("HEAGAWGHEE", "s1"),
-                          Alignment([Protein("PAWHEAE", "s2")]),
+                          Protein("HEAGAWGHEE", metadata={'id': "s1"}),
+                          Alignment([Protein("PAWHEAE",
+                                             metadata={'id': "s2"})]),
                           gap_open_penalty=10., gap_extend_penalty=5.)
 
         # ids are provided if they're not passed in
         actual = local_pairwise_align_protein(
-            Protein("HEAGAWGHEE"), Protein("PAWHEAE"),
+            Protein("HEAGAWGHEE"),
+            Protein("PAWHEAE"),
             gap_open_penalty=10., gap_extend_penalty=5.)
         self.assertEqual(actual.ids(), list('01'))
 
@@ -255,7 +262,8 @@ class PairwiseAlignmentTests(TestCase):
         # DNA (rather than str) as input
         expected = ("-GACCTTGACCAGGTACC", "GAACTTTGAC---GTAAC", 32.0, 0, 0)
         actual = global_pairwise_align_nucleotide(
-            DNA("GACCTTGACCAGGTACC", "s1"), DNA("GAACTTTGACGTAAC", "s2"),
+            DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
+            DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
             mismatch_score=-4)
         self.assertEqual(str(actual[0]), expected[0])
@@ -268,9 +276,9 @@ class PairwiseAlignmentTests(TestCase):
         expected = ("-GACCTTGACCAGGTACC", "-GACCATGACCAGGTACC",
                     "GAACTTTGAC---GTAAC", 27.5, 0, 0)
         actual = global_pairwise_align_nucleotide(
-            Alignment([DNA("GACCTTGACCAGGTACC", "s1"),
-                       DNA("GACCATGACCAGGTACC", "s2")]),
-            DNA("GAACTTTGACGTAAC", "s3"),
+            Alignment([DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
+                       DNA("GACCATGACCAGGTACC", metadata={'id': "s2"})]),
+            DNA("GAACTTTGACGTAAC", metadata={'id': "s3"}),
             gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
             mismatch_score=-4)
         self.assertEqual(str(actual[0]), expected[0])
@@ -282,7 +290,8 @@ class PairwiseAlignmentTests(TestCase):
 
         # ids are provided if they're not passed in
         actual = global_pairwise_align_nucleotide(
-            DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+            DNA("GACCTTGACCAGGTACC"),
+            DNA("GAACTTTGACGTAAC"),
             gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
             mismatch_score=-4)
         self.assertEqual(actual.ids(), list('01'))
@@ -317,7 +326,8 @@ class PairwiseAlignmentTests(TestCase):
         # DNA (rather than str) as input
         expected = ("ACCTTGAC", "ACTTTGAC", 31.0, 1, 2)
         actual = local_pairwise_align_nucleotide(
-            DNA("GACCTTGACCAGGTACC", "s1"), DNA("GAACTTTGACGTAAC", "s2"),
+            DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
+            DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
             mismatch_score=-4)
         self.assertEqual(str(actual[0]), expected[0])
@@ -328,19 +338,22 @@ class PairwiseAlignmentTests(TestCase):
 
         # Fails when either input is passed as an Alignment
         self.assertRaises(TypeError, local_pairwise_align_nucleotide,
-                          Alignment([DNA("GACCTTGACCAGGTACC", "s1")]),
-                          DNA("GAACTTTGACGTAAC", "s2"),
+                          Alignment([DNA("GACCTTGACCAGGTACC",
+                                         metadata={'id': "s1"})]),
+                          DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
                           gap_open_penalty=10., gap_extend_penalty=5.,
                           match_score=5, mismatch_score=-4)
         self.assertRaises(TypeError, local_pairwise_align_nucleotide,
-                          DNA("GACCTTGACCAGGTACC", "s1"),
-                          Alignment([DNA("GAACTTTGACGTAAC", "s2")]),
+                          DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
+                          Alignment([DNA("GAACTTTGACGTAAC",
+                                         metadata={'id': "s2"})]),
                           gap_open_penalty=10., gap_extend_penalty=5.,
                           match_score=5, mismatch_score=-4)
 
         # ids are provided if they're not passed in
         actual = local_pairwise_align_nucleotide(
-            DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
+            DNA("GACCTTGACCAGGTACC"),
+            DNA("GAACTTTGACGTAAC"),
             gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
             mismatch_score=-4)
         self.assertEqual(actual.ids(), list('01'))
@@ -389,7 +402,8 @@ class PairwiseAlignmentTests(TestCase):
                             [0, -1, -1, -1],
                             [0, -1, -1, -1]]
         actual_score_m, actual_tback_m = _init_matrices_sw(
-            Alignment([DNA('AAA')]), Alignment([DNA('AAAA')]), 5, 2)
+            Alignment([DNA('AAA', metadata={'id': 'id'})]),
+            Alignment([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
@@ -405,7 +419,8 @@ class PairwiseAlignmentTests(TestCase):
                             [2, -1, -1, -1],
                             [2, -1, -1, -1]]
         actual_score_m, actual_tback_m = _init_matrices_nw(
-            Alignment([DNA('AAA')]), Alignment([DNA('AAAA')]), 5, 2)
+            Alignment([DNA('AAA', metadata={'id': 'id'})]),
+            Alignment([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
@@ -447,8 +462,8 @@ class PairwiseAlignmentTests(TestCase):
                             [2, 2, 2, 2]]
         m = make_identity_substitution_matrix(2, -1)
         actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
-            Alignment([DNA('ACG')]),
-            Alignment([DNA('ACGT')]), 5, 2, m)
+            Alignment([DNA('ACG', metadata={'id': 'id'})]),
+            Alignment([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
@@ -466,8 +481,8 @@ class PairwiseAlignmentTests(TestCase):
                             [2, 2, 2, 1]]
         m = make_identity_substitution_matrix(2, -1)
         actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
-            Alignment([DNA('ACC')]),
-            Alignment([DNA('ACGT')]), 5, 2, m)
+            Alignment([DNA('ACC', metadata={'id': 'id'})]),
+            Alignment([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
@@ -485,8 +500,10 @@ class PairwiseAlignmentTests(TestCase):
                             [2, 2, 2, 1]]
         m = make_identity_substitution_matrix(2, -1)
         actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
-            Alignment([DNA('ACC', 's1'), DNA('ACC', 's2')]),
-            Alignment([DNA('ACGT', 's3'), DNA('ACGT', 's4')]), 5, 2, m)
+            Alignment([DNA('ACC', metadata={'id': 's1'}),
+                       DNA('ACC', metadata={'id': 's2'})]),
+            Alignment([DNA('ACGT', metadata={'id': 's3'}),
+                       DNA('ACGT', metadata={'id': 's4'})]), 5, 2, m)
         np.testing.assert_array_equal(actual_score_m, expected_score_m)
         np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
 
@@ -495,8 +512,9 @@ class PairwiseAlignmentTests(TestCase):
         # substitution matrix, an informative error should be raised
         m = make_identity_substitution_matrix(2, -1)
         self.assertRaises(ValueError, _compute_score_and_traceback_matrices,
-                          Alignment([DNA('AWG')]),
-                          Alignment([DNA('ACGT')]), 5, 2, m)
+                          Alignment([DNA('AWG', metadata={'id': 'id'})]),
+                          Alignment([DNA('ACGT', metadata={'id': 'id'})]),
+                          5, 2, m)
 
     def test_traceback(self):
         score_m = [[0, -5, -7, -9],
@@ -512,10 +530,12 @@ class PairwiseAlignmentTests(TestCase):
                    [2, 2, 2, 2]]
         tback_m = np.array(tback_m)
         # start at bottom-right
-        expected = ([BiologicalSequence("ACG-")],
-                    [BiologicalSequence("ACGT")], 1, 0, 0)
-        actual = _traceback(tback_m, score_m, Alignment([DNA('ACG')]),
-                            Alignment([DNA('ACGT')]), 4, 3)
+        expected = ([DNA("ACG-", metadata={'id': '0'})],
+                    [DNA("ACGT", metadata={'id': '1'})], 1, 0, 0)
+        actual = _traceback(tback_m, score_m,
+                            Alignment([DNA('ACG', metadata={'id': ''})]),
+                            Alignment([DNA('ACGT', metadata={'id': ''})]),
+                            4, 3)
         self.assertEqual(actual, expected)
 
         # four sequences in two alignments
@@ -532,20 +552,26 @@ class PairwiseAlignmentTests(TestCase):
                    [2, 2, 2, 2]]
         tback_m = np.array(tback_m)
         # start at bottom-right
-        expected = ([BiologicalSequence("ACG-"), BiologicalSequence("ACG-")],
-                    [BiologicalSequence("ACGT"), BiologicalSequence("ACGT")],
+        expected = ([DNA("ACG-", metadata={'id': 's1'}),
+                     DNA("ACG-", metadata={'id': 's2'})],
+                    [DNA("ACGT", metadata={'id': 's3'}),
+                     DNA("ACGT", metadata={'id': 's4'})],
                     1, 0, 0)
         actual = _traceback(tback_m, score_m,
-                            Alignment([DNA('ACG', 's1'), DNA('ACG', 's2')]),
-                            Alignment([DNA('ACGT', 's3'), DNA('ACGT', 's4')]),
+                            Alignment([DNA('ACG', metadata={'id': 's1'}),
+                                       DNA('ACG', metadata={'id': 's2'})]),
+                            Alignment([DNA('ACGT', metadata={'id': 's3'}),
+                                       DNA('ACGT', metadata={'id': 's4'})]),
                             4, 3)
         self.assertEqual(actual, expected)
 
         # start at highest-score
-        expected = ([BiologicalSequence("ACG")],
-                    [BiologicalSequence("ACG")], 6, 0, 0)
-        actual = _traceback(tback_m, score_m, Alignment([DNA('ACG')]),
-                            Alignment([DNA('ACGT')]), 3, 3)
+        expected = ([DNA("ACG", metadata={'id': '0'})],
+                    [DNA("ACG", metadata={'id': '1'})], 6, 0, 0)
+        actual = _traceback(tback_m, score_m,
+                            Alignment([DNA('ACG', metadata={'id': ''})]),
+                            Alignment([DNA('ACGT', metadata={'id': ''})]),
+                            3, 3)
         self.assertEqual(actual, expected)
 
         # terminate traceback before top-right
@@ -556,16 +582,22 @@ class PairwiseAlignmentTests(TestCase):
                    [2, 2, 2, 2]]
         tback_m = np.array(tback_m)
         expected = ("G", "G", 6, 2, 2)
-        expected = ([BiologicalSequence("G")],
-                    [BiologicalSequence("G")], 6, 2, 2)
-        actual = _traceback(tback_m, score_m, Alignment([DNA('ACG')]),
-                            Alignment([DNA('ACGT')]), 3, 3)
+        expected = ([DNA("G", metadata={'id': '0'})],
+                    [DNA("G", metadata={'id': '1'})], 6, 2, 2)
+        actual = _traceback(tback_m, score_m,
+                            Alignment([DNA('ACG', metadata={'id': ''})]),
+                            Alignment([DNA('ACGT', metadata={'id': ''})]),
+                            3, 3)
         self.assertEqual(actual, expected)
 
     def test_get_seq_id(self):
-        self.assertEqual(_get_seq_id("AAA", "hello"), "hello")
         self.assertEqual(_get_seq_id(DNA("AAA"), "hello"), "hello")
-        self.assertEqual(_get_seq_id(DNA("AAA", "s1"), "hello"), "s1")
+        self.assertEqual(_get_seq_id(DNA("AAA", metadata={'id': "s1"}),
+                                     "hello"), "s1")
+        self.assertEqual(_get_seq_id(DNA("AAA", metadata={'id': None}),
+                                     "hello"), "hello")
+        self.assertEqual(_get_seq_id(DNA("AAA", metadata={'id': '\t'}),
+                                     "hello"), "hello")
 
     def test_first_largest(self):
         l = [(5, 'a'), (5, 'b'), (5, 'c')]
diff --git a/skbio/alignment/tests/test_ssw.py b/skbio/alignment/tests/test_ssw.py
index 2f6a66b..34e37da 100644
--- a/skbio/alignment/tests/test_ssw.py
+++ b/skbio/alignment/tests/test_ssw.py
@@ -1,10 +1,10 @@
-# -----------------------------------------------------------------------------
-#  Copyright (c) 2013--, scikit-bio development team.
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
 #
-#  Distributed under the terms of the Modified BSD License.
+# Distributed under the terms of the Modified BSD License.
 #
-#  The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
 
 # Special thanks to http://www.faculty.ucr.edu/~mmaduro/random.htm for the
 # random DNA generator.
@@ -16,9 +16,11 @@
 # the resulting alignments are verified by hand. Creating tests from the base
 # C API is impractical at this time.
 
+from __future__ import absolute_import, division, print_function
+
 from unittest import TestCase, main
 
-from skbio import local_pairwise_align_ssw
+from skbio import local_pairwise_align_ssw, Sequence, DNA
 from skbio.alignment import StripedSmithWaterman, AlignmentStructure
 from skbio.alignment._pairwise import blosum50
 
@@ -593,6 +595,17 @@ class TestAlignStripedSmithWaterman(TestSSW):
                                           target_sequence, **kwargs)
         self._check_Alignment_to_AlignmentStructure(align2, align1)
 
+    def test_constructor(self):
+        query_sequence = 'AGGGTAATTAGGCGTGTTCACCTA'
+        target_sequence = 'TACTTATAAGATGTCTCAACGGCATGCGCAACTTGTGAAGTG'
+
+        align1 = local_pairwise_align_ssw(query_sequence, target_sequence)
+        align2 = local_pairwise_align_ssw(query_sequence, target_sequence,
+                                          constructor=DNA)
+
+        self.assertEqual(type(align1[0]), Sequence)
+        self.assertEqual(type(align2[0]), DNA)
+
 
 class TestAlignmentStructure(TestSSW):
 
diff --git a/skbio/diversity/__init__.py b/skbio/diversity/__init__.py
index 5015b62..24d4937 100644
--- a/skbio/diversity/__init__.py
+++ b/skbio/diversity/__init__.py
@@ -26,5 +26,7 @@ Subpackages
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
-test = Tester().test
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
+test = TestRunner(__file__).test
diff --git a/skbio/diversity/alpha/__init__.py b/skbio/diversity/alpha/__init__.py
index b63122f..a196652 100644
--- a/skbio/diversity/alpha/__init__.py
+++ b/skbio/diversity/alpha/__init__.py
@@ -138,7 +138,9 @@ Let's see how many singletons and doubletons there are in the sample:
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
 
 from ._ace import ace
 from ._chao1 import chao1, chao1_ci
@@ -158,4 +160,4 @@ __all__ = ['ace', 'chao1', 'chao1_ci', 'berger_parker_d', 'brillouin_d',
            'shannon', 'simpson', 'simpson_e', 'singles', 'strong',
            'gini_index', 'lladser_pe', 'lladser_ci']
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/diversity/alpha/_ace.py b/skbio/diversity/alpha/_ace.py
index 5da3ad3..e50cfea 100644
--- a/skbio/diversity/alpha/_ace.py
+++ b/skbio/diversity/alpha/_ace.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,11 +6,15 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import numpy as np
 
 from ._base import _validate
+from skbio.util._decorator import experimental
 
 
+ at experimental(as_of="0.4.0")
 def ace(counts, rare_threshold=10):
     """Calculate the ACE metric (Abundance-based Coverage Estimator).
 
diff --git a/skbio/diversity/alpha/_base.py b/skbio/diversity/alpha/_base.py
index 127d95b..afa288d 100644
--- a/skbio/diversity/alpha/_base.py
+++ b/skbio/diversity/alpha/_base.py
@@ -13,6 +13,7 @@ from scipy.special import gammaln
 from scipy.optimize import fmin_powell, minimize_scalar
 
 from skbio.stats import subsample_counts
+from skbio.util._decorator import experimental
 
 
 def _validate(counts, suppress_cast=False):
@@ -34,6 +35,7 @@ def _validate(counts, suppress_cast=False):
     return counts
 
 
+ at experimental(as_of="0.4.0")
 def berger_parker_d(counts):
     """Calculate Berger-Parker dominance.
 
@@ -69,6 +71,7 @@ def berger_parker_d(counts):
     return counts.max() / counts.sum()
 
 
+ at experimental(as_of="0.4.0")
 def brillouin_d(counts):
     """Calculate Brillouin index of alpha diversity, which is defined as:
 
@@ -103,6 +106,7 @@ def brillouin_d(counts):
     return (gammaln(n + 1) - gammaln(nz + 1).sum()) / n
 
 
+ at experimental(as_of="0.4.0")
 def dominance(counts):
     """Calculate dominance.
 
@@ -146,6 +150,7 @@ def dominance(counts):
     return (freqs * freqs).sum()
 
 
+ at experimental(as_of="0.4.0")
 def doubles(counts):
     """Calculate number of double occurrences (doubletons).
 
@@ -164,6 +169,7 @@ def doubles(counts):
     return (counts == 2).sum()
 
 
+ at experimental(as_of="0.4.0")
 def enspie(counts):
     """Calculate ENS_pie alpha diversity measure.
 
@@ -198,6 +204,7 @@ def enspie(counts):
     return 1 / dominance(counts)
 
 
+ at experimental(as_of="0.4.0")
 def equitability(counts, base=2):
     """Calculate equitability (Shannon index corrected for number of OTUs).
 
@@ -233,6 +240,7 @@ def equitability(counts, base=2):
     return numerator / denominator
 
 
+ at experimental(as_of="0.4.0")
 def esty_ci(counts):
     """Calculate Esty's CI.
 
@@ -287,6 +295,7 @@ def esty_ci(counts):
     return f1 / n - z * np.sqrt(W), f1 / n + z * np.sqrt(W)
 
 
+ at experimental(as_of="0.4.0")
 def fisher_alpha(counts):
     """Calculate Fisher's alpha.
 
@@ -338,6 +347,7 @@ def fisher_alpha(counts):
     return alpha
 
 
+ at experimental(as_of="0.4.0")
 def goods_coverage(counts):
     """Calculate Good's coverage of counts.
 
@@ -367,6 +377,7 @@ def goods_coverage(counts):
     return 1 - (f1 / N)
 
 
+ at experimental(as_of="0.4.0")
 def heip_e(counts):
     """Calculate Heip's evenness measure.
 
@@ -395,6 +406,7 @@ def heip_e(counts):
             (observed_otus(counts) - 1))
 
 
+ at experimental(as_of="0.4.0")
 def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
     """Calculate Kempton-Taylor Q index of alpha diversity.
 
@@ -445,6 +457,7 @@ def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
                                     sorted_counts[lower])
 
 
+ at experimental(as_of="0.4.0")
 def margalef(counts):
     """Calculate Margalef's richness index, which is defined as:
 
@@ -482,6 +495,7 @@ def margalef(counts):
     return (observed_otus(counts) - 1) / np.log(counts.sum())
 
 
+ at experimental(as_of="0.4.0")
 def mcintosh_d(counts):
     """Calculate McIntosh dominance index D, which is defined as:
 
@@ -531,6 +545,7 @@ def mcintosh_d(counts):
     return (n - u) / (n - np.sqrt(n))
 
 
+ at experimental(as_of="0.4.0")
 def mcintosh_e(counts):
     """Calculate McIntosh's evenness measure E.
 
@@ -566,6 +581,7 @@ def mcintosh_e(counts):
     return numerator / denominator
 
 
+ at experimental(as_of="0.4.0")
 def menhinick(counts):
     """Calculate Menhinick's richness index.
 
@@ -595,6 +611,7 @@ def menhinick(counts):
     return observed_otus(counts) / np.sqrt(counts.sum())
 
 
+ at experimental(as_of="0.4.0")
 def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
     """Calculate Michaelis-Menten fit to rarefaction curve of observed OTUs.
 
@@ -673,6 +690,7 @@ def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
                        disp=False)[0]
 
 
+ at experimental(as_of="0.4.0")
 def observed_otus(counts):
     """Calculate the number of distinct OTUs.
 
@@ -691,6 +709,7 @@ def observed_otus(counts):
     return (counts != 0).sum()
 
 
+ at experimental(as_of="0.4.0")
 def osd(counts):
     """Calculate observed OTUs, singles, and doubles.
 
@@ -720,6 +739,7 @@ def osd(counts):
     return observed_otus(counts), singles(counts), doubles(counts)
 
 
+ at experimental(as_of="0.4.0")
 def robbins(counts):
     """Calculate Robbins' estimator for the probability of unobserved outcomes.
 
@@ -755,6 +775,7 @@ def robbins(counts):
     return singles(counts) / counts.sum()
 
 
+ at experimental(as_of="0.4.0")
 def shannon(counts, base=2):
     """Calculate Shannon entropy of counts (H), default in bits.
 
@@ -787,6 +808,7 @@ def shannon(counts, base=2):
     return -(nonzero_freqs * np.log(nonzero_freqs)).sum() / np.log(base)
 
 
+ at experimental(as_of="0.4.0")
 def simpson(counts):
     """Calculate Simpson's index.
 
@@ -821,6 +843,7 @@ def simpson(counts):
     return 1 - dominance(counts)
 
 
+ at experimental(as_of="0.4.0")
 def simpson_e(counts):
     """Calculate Simpson's evenness measure E.
 
@@ -862,6 +885,7 @@ def simpson_e(counts):
     return enspie(counts) / observed_otus(counts)
 
 
+ at experimental(as_of="0.4.0")
 def singles(counts):
     """Calculate number of single occurrences (singletons).
 
@@ -880,6 +904,7 @@ def singles(counts):
     return (counts == 1).sum()
 
 
+ at experimental(as_of="0.4.0")
 def strong(counts):
     """Calculate Strong's dominance index (Dw).
 
diff --git a/skbio/diversity/alpha/_chao1.py b/skbio/diversity/alpha/_chao1.py
index 7e883bf..4c3c93d 100644
--- a/skbio/diversity/alpha/_chao1.py
+++ b/skbio/diversity/alpha/_chao1.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,11 +6,15 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import numpy as np
 
 from ._base import _validate, osd
+from skbio.util._decorator import experimental
 
 
+ at experimental(as_of="0.4.0")
 def chao1(counts, bias_corrected=True):
     """Calculate chao1 richness estimator.
 
@@ -71,6 +72,7 @@ def chao1(counts, bias_corrected=True):
         return o + s * (s - 1) / (2 * (d + 1))
 
 
+ at experimental(as_of="0.4.0")
 def chao1_ci(counts, bias_corrected=True, zscore=1.96):
     """Calculate chao1 confidence interval.
 
diff --git a/skbio/diversity/alpha/_gini.py b/skbio/diversity/alpha/_gini.py
index bdf74f1..160e253 100644
--- a/skbio/diversity/alpha/_gini.py
+++ b/skbio/diversity/alpha/_gini.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,11 +6,15 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import numpy as np
 
 from ._base import _validate
+from skbio.util._decorator import experimental
 
 
+ at experimental(as_of="0.4.0")
 def gini_index(data, method='rectangles'):
     """Calculate the Gini index.
 
diff --git a/skbio/diversity/alpha/_lladser.py b/skbio/diversity/alpha/_lladser.py
index 79b6df6..61d6c02 100644
--- a/skbio/diversity/alpha/_lladser.py
+++ b/skbio/diversity/alpha/_lladser.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,11 +6,15 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import numpy as np
 
 from ._base import _validate
+from skbio.util._decorator import experimental
 
 
+ at experimental(as_of="0.4.0")
 def lladser_pe(counts, r=10):
     """Calculate single point estimate of conditional uncovered probability.
 
@@ -38,7 +39,8 @@ def lladser_pe(counts, r=10):
     -----
     This function is just a wrapper around the full point estimator described
     in Theorem 2 (i) in [1]_, intended to be called for a single best estimate
-    on a complete sample.
+    on a complete sample. This function is not guaranteed to return estimated
+    uncovered probabilities less than 1 if the coverage is too low.
 
     References
     ----------
@@ -59,6 +61,7 @@ def lladser_pe(counts, r=10):
     return pe
 
 
+ at experimental(as_of="0.4.0")
 def lladser_ci(counts, r, alpha=0.95, f=10, ci_type='ULCL'):
     """Calculate single CI of the conditional uncovered probability.
 
diff --git a/skbio/diversity/alpha/tests/__init__.py b/skbio/diversity/alpha/tests/__init__.py
index c99682c..3fe3dc6 100644
--- a/skbio/diversity/alpha/tests/__init__.py
+++ b/skbio/diversity/alpha/tests/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -7,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/diversity/alpha/tests/test_ace.py b/skbio/diversity/alpha/tests/test_ace.py
index 65de9e5..6201cea 100644
--- a/skbio/diversity/alpha/tests/test_ace.py
+++ b/skbio/diversity/alpha/tests/test_ace.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,6 +6,8 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import numpy as np
 from nose.tools import assert_almost_equal, assert_raises
 
diff --git a/skbio/diversity/alpha/tests/test_base.py b/skbio/diversity/alpha/tests/test_base.py
index d418b02..a5a3593 100644
--- a/skbio/diversity/alpha/tests/test_base.py
+++ b/skbio/diversity/alpha/tests/test_base.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,6 +6,8 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from unittest import TestCase, main
 
 import numpy as np
diff --git a/skbio/diversity/alpha/tests/test_chao1.py b/skbio/diversity/alpha/tests/test_chao1.py
index 45a1fa5..c0f8ce7 100644
--- a/skbio/diversity/alpha/tests/test_chao1.py
+++ b/skbio/diversity/alpha/tests/test_chao1.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,6 +6,8 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from unittest import TestCase, main
 
 import numpy as np
diff --git a/skbio/diversity/alpha/tests/test_gini.py b/skbio/diversity/alpha/tests/test_gini.py
index e04c99a..2fa0491 100644
--- a/skbio/diversity/alpha/tests/test_gini.py
+++ b/skbio/diversity/alpha/tests/test_gini.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,6 +6,8 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from unittest import TestCase, main
 
 import numpy as np
diff --git a/skbio/diversity/beta/__init__.py b/skbio/diversity/beta/__init__.py
index 946f8cd..610c705 100644
--- a/skbio/diversity/beta/__init__.py
+++ b/skbio/diversity/beta/__init__.py
@@ -97,11 +97,6 @@ Create a table containing 7 OTUs and 6 samples:
    let's define some:
 
    >>> import pandas as pd
-   >>> try:
-   ...     # not necessary for normal use
-   ...     pd.set_option('show_dimensions', True)
-   ... except KeyError:
-   ...     pass
    >>> sample_md = {
    ...    'A': {'body_site': 'gut', 'subject': 's1'},
    ...    'B': {'body_site': 'skin', 'subject': 's1'},
@@ -118,8 +113,6 @@ Create a table containing 7 OTUs and 6 samples:
    D      s2       gut
    E      s2    tongue
    F      s2      skin
-   <BLANKLINE>
-   [6 rows x 2 columns]
 
    Now let's plot our PCoA results, coloring each sample by the subject it
    was taken from:
@@ -185,10 +178,12 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
 
 from ._base import pw_distances, pw_distances_from_table
 
 __all__ = ["pw_distances", "pw_distances_from_table"]
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/diversity/beta/_base.py b/skbio/diversity/beta/_base.py
index 0b63418..5922503 100644
--- a/skbio/diversity/beta/_base.py
+++ b/skbio/diversity/beta/_base.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,14 +6,16 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from warnings import warn
+from __future__ import absolute_import, division, print_function
 
 import numpy as np
 from scipy.spatial.distance import pdist, squareform
 
 from skbio.stats.distance import DistanceMatrix
+from skbio.util._decorator import experimental, deprecated
 
 
+ at experimental(as_of="0.4.0")
 def pw_distances(counts, ids=None, metric="braycurtis"):
     """Compute distances between all pairs of columns in a counts matrix
 
@@ -58,7 +58,14 @@ def pw_distances(counts, ids=None, metric="braycurtis"):
     return DistanceMatrix(
         squareform(distances, force='tomatrix', checks=False), ids)
 
+pw_distances_from_table_deprecation_reason = (
+    "In the future, pw_distance will take a biom.table.Table object "
+    "and this function will be removed. You will need to update your "
+    "code to call pw_distances at that time.")
+
 
+ at deprecated(as_of="0.4.0", until="0.4.1",
+            reason=pw_distances_from_table_deprecation_reason)
 def pw_distances_from_table(table, metric="braycurtis"):
     """Compute distances between all pairs of samples in table
 
@@ -85,10 +92,6 @@ def pw_distances_from_table(table, metric="braycurtis"):
     pw_distances
 
     """
-    warn("pw_distances_from_table is deprecated. In the future (tentatively "
-         "scikit-bio 0.2.0), pw_distance will take a biom.table.Table object "
-         "and this function will be removed. You will need to update your "
-         "code to call pw_distances at that time.", DeprecationWarning)
     sample_ids = table.ids(axis="sample")
     num_samples = len(sample_ids)
 
diff --git a/skbio/diversity/beta/tests/__init__.py b/skbio/diversity/beta/tests/__init__.py
index 0bf0c55..3fe3dc6 100644
--- a/skbio/diversity/beta/tests/__init__.py
+++ b/skbio/diversity/beta/tests/__init__.py
@@ -5,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/diversity/beta/tests/test_base.py b/skbio/diversity/beta/tests/test_base.py
index 078cb5a..f2f9cd7 100644
--- a/skbio/diversity/beta/tests/test_base.py
+++ b/skbio/diversity/beta/tests/test_base.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,6 +6,8 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from unittest import TestCase, main
 
 import numpy as np
diff --git a/skbio/draw/__init__.py b/skbio/draw/__init__.py
index d3be208..a2469b5 100644
--- a/skbio/draw/__init__.py
+++ b/skbio/draw/__init__.py
@@ -28,10 +28,12 @@ Functions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
 
 from ._distributions import boxplots, grouped_distributions
 
 __all__ = ['boxplots', 'grouped_distributions']
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/draw/_distributions.py b/skbio/draw/_distributions.py
index 2e664ec..da9d3df 100644
--- a/skbio/draw/_distributions.py
+++ b/skbio/draw/_distributions.py
@@ -8,7 +8,6 @@
 
 from __future__ import absolute_import, division, print_function
 from future.builtins import map, range, zip
-from six import string_types
 
 from itertools import cycle
 import warnings
@@ -17,8 +16,18 @@ import numpy as np
 import matplotlib.pyplot as plt
 from matplotlib.lines import Line2D
 from matplotlib.patches import Polygon, Rectangle
+import six
 
+from skbio.util._decorator import deprecated
 
+distribution_plot_deprecation_p = {
+    'as_of': '0.4.0', 'until': '0.4.1', 'reason': (
+        "Plots that are not specific to bioinformatics should be generated "
+        "with seaborn or another general-purpose plotting package."
+    )}
+
+
+ at deprecated(**distribution_plot_deprecation_p)
 def boxplots(distributions, x_values=None, x_tick_labels=None, title=None,
              x_label=None, y_label=None, x_tick_labels_orientation='vertical',
              y_min=None, y_max=None, whisker_length=1.5, box_width=0.5,
@@ -140,6 +149,7 @@ def boxplots(distributions, x_values=None, x_tick_labels=None, title=None,
     return fig
 
 
+ at deprecated(**distribution_plot_deprecation_p)
 def grouped_distributions(plot_type, data, x_values=None,
                           data_point_labels=None, distribution_labels=None,
                           distribution_markers=None, x_label=None,
@@ -355,7 +365,7 @@ def _validate_input(data, x_values, data_point_labels, distribution_labels):
     Validates plotting options to make sure they are valid with the supplied
     data.
     """
-    if data is None or not data or isinstance(data, string_types):
+    if data is None or not data or isinstance(data, six.string_types):
         raise ValueError("The data must be a list type, and it cannot be "
                          "None or empty.")
 
@@ -552,7 +562,7 @@ def _is_single_matplotlib_color(color):
     """Returns True if color is a single (not a list) mpl color."""
     single_color = False
 
-    if (isinstance(color, str)):
+    if (isinstance(color, six.string_types)):
         single_color = True
     elif len(color) == 3 or len(color) == 4:
         single_color = True
diff --git a/skbio/draw/tests/__init__.py b/skbio/draw/tests/__init__.py
index c99682c..3fe3dc6 100644
--- a/skbio/draw/tests/__init__.py
+++ b/skbio/draw/tests/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -7,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/draw/tests/test_distributions.py b/skbio/draw/tests/test_distributions.py
index 7cccbb5..2ffeda6 100644
--- a/skbio/draw/tests/test_distributions.py
+++ b/skbio/draw/tests/test_distributions.py
@@ -8,6 +8,7 @@
 
 from __future__ import absolute_import, division, print_function
 
+import warnings
 from unittest import TestCase, main
 
 import numpy as np
@@ -130,14 +131,21 @@ class DistributionsTests(TestCase):
                          ['b', 'g', 'r', 'c'])
 
     def test_get_distribution_markers_insufficient_markers(self):
-        self.assertEqual(npt.assert_warns(RuntimeWarning,
-                                          _get_distribution_markers,
-                                          'colors', None, 10),
-                         ['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r'])
-        self.assertEqual(npt.assert_warns(RuntimeWarning,
-                                          _get_distribution_markers,
-                                          'symbols', ['^', '>', '<'], 5),
-                         ['^', '>', '<', '^', '>'])
+
+        expected = ['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r']
+        # adapted from SO example here: http://stackoverflow.com/a/3892301
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            actual = _get_distribution_markers('colors', None, 10)
+            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
+            self.assertEqual(actual, expected)
+
+        expected = ['^', '>', '<', '^', '>']
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            actual = _get_distribution_markers('symbols', ['^', '>', '<'], 5)
+            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
+            self.assertEqual(actual, expected)
 
     def test_get_distribution_markers_bad_marker_type(self):
         with npt.assert_raises(ValueError):
@@ -328,9 +336,11 @@ class DistributionsTests(TestCase):
                 ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
                 ['b', 'r'], "x-axis label", "y-axis label", "Test")
 
-        npt.assert_warns(RuntimeWarning,
-                         grouped_distributions,
-                         *args)
+        # adapted from SO example here: http://stackoverflow.com/a/3892301
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            grouped_distributions(*args)
+            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
 
     def test_grouped_distributions_scatter(self):
         fig = grouped_distributions('scatter', self.ValidTypicalData,
@@ -351,7 +361,11 @@ class DistributionsTests(TestCase):
                 ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
                 ['^'], "x-axis label", "y-axis label", "Test")
 
-        npt.assert_warns(RuntimeWarning, grouped_distributions, *args)
+        # adapted from SO example here: http://stackoverflow.com/a/3892301
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            grouped_distributions(*args)
+            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
 
     def test_grouped_distributions_empty_marker_list(self):
         grouped_distributions('scatter', self.ValidTypicalData,
@@ -568,7 +582,12 @@ class DistributionsTests(TestCase):
                                          'oooooooooooooooooooooooooooooooo'
                                          'oooo', 'barbarbar'],
                           x_tick_labels_orientation='vertical')
-        npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3)
+
+        # adapted from SO example here: http://stackoverflow.com/a/3892301
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            _set_figure_size(fig, 3, 3)
+            self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
         npt.assert_array_equal(fig.get_size_inches(), (3, 3))
 
 
diff --git a/skbio/format/sequences/__init__.py b/skbio/format/sequences/__init__.py
deleted file mode 100644
index 77f5ea1..0000000
--- a/skbio/format/sequences/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-r"""
-Format biological sequences (:mod:`skbio.format.sequences`)
-===========================================================
-
-.. currentmodule:: skbio.format.sequences
-
-This module provides functions for writing sequence files in a variety of
-different formats, the available formatters are listed below.
-
-Functions
----------
-
-.. autosummary::
-   :toctree: generated/
-
-    fasta_from_sequences
-    fasta_from_alignment
-    format_fastq_record
-
-"""
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from numpy.testing import Tester
-
-from .fasta import fasta_from_sequences, fasta_from_alignment
-from .fastq import format_fastq_record
-
-__all__ = ['fasta_from_sequences', 'fasta_from_alignment',
-           'format_fastq_record']
-
-
-test = Tester().test
diff --git a/skbio/format/sequences/fasta.py b/skbio/format/sequences/fasta.py
deleted file mode 100644
index bf90a06..0000000
--- a/skbio/format/sequences/fasta.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-import warnings
-
-from skbio.alignment import Alignment
-from skbio.sequence import BiologicalSequence
-
-
-def fasta_from_sequences(seqs, make_seqlabel=None, line_wrap=None):
-    """Returns a FASTA string given a list of sequence objects.
-
-    .. note:: Deprecated in scikit-bio 0.2.0-dev
-       ``fasta_from_sequences`` will be removed in scikit-bio 0.3.0. It is
-       replaced by ``write``, which is a more general method for serializing
-       FASTA-formatted files. ``write`` supports multiple file formats by
-       taking advantage of scikit-bio's I/O registry system. See
-       :mod:`skbio.io` for more details.
-
-    A ``sequence.Label`` attribute takes precedence over ``sequence.Name``.
-
-    Parameters
-    ----------
-    seqs : list
-        seqs can be a list of sequence objects or strings.
-    make_seqlabel : function, optional
-        callback function that takes the seq object and returns a label
-        ``str``. If ``None`` is passed, the following attributes will try to be
-        retrieved in this order and the first to exist will be used:
-        ``id``, ``Label`` or ``Name``. In any other case an integer
-        with the position of the sequence object will be used.
-    line_wrap : int, optional
-        line_wrap: a integer for maximum line width, if ``None`` is passed the
-        full sequence will be used.
-
-    Returns
-    -------
-    str
-        FASTA formatted string composed of the objects passed in via `seqs`.
-
-    See Also
-    --------
-    skbio.parse.sequences.parse_fasta
-
-    Examples
-    --------
-    Formatting a list of sequence objects
-
-    >>> from skbio.format.sequences import fasta_from_sequences
-    >>> from skbio.sequence import DNASequence
-    >>> seqs = [DNASequence('ACTCGAGATC', 'seq1'),
-    ...         DNASequence('GGCCT', 'seq2')]
-    >>> print fasta_from_sequences(seqs)
-    >seq1
-    ACTCGAGATC
-    >seq2
-    GGCCT
-
-    """
-    warnings.warn(
-        "`fasta_from_sequences` is deprecated and will be removed in "
-        "scikit-bio 0.3.0. Please update your code to use `skbio.io.write`.",
-        DeprecationWarning)
-
-    fasta_list = []
-    for i, seq in enumerate(seqs):
-        # Check if it has a label, or one is to be created
-        label = str(i)
-        if make_seqlabel is not None:
-            label = make_seqlabel(seq)
-        elif hasattr(seq, 'id') and seq.id:
-            label = seq.id
-        elif hasattr(seq, 'Label') and seq.Label:
-            label = seq.Label
-        elif hasattr(seq, 'Name') and seq.Name:
-            label = seq.Name
-
-        # wrap sequence lines
-        seq_str = str(seq)
-        if line_wrap is not None:
-            numlines, remainder = divmod(len(seq_str), line_wrap)
-            if remainder:
-                numlines += 1
-            body = [seq_str[j * line_wrap:(j + 1) * line_wrap]
-                    for j in range(numlines)]
-        else:
-            body = [seq_str]
-
-        fasta_list.append('>' + label)
-        fasta_list += body
-
-    return '\n'.join(fasta_list)
-
-
-def fasta_from_alignment(aln, make_seqlabel=None, line_wrap=None, sort=True):
-    """Returns a FASTA string given an alignment object
-
-    .. note:: Deprecated in scikit-bio 0.2.0-dev
-       ``fasta_from_alignment`` will be removed in scikit-bio 0.3.0. It is
-       replaced by ``write``, which is a more general method for serializing
-       FASTA-formatted files. ``write`` supports multiple file formats by
-       taking advantage of scikit-bio's I/O registry system. See
-       :mod:`skbio.io` for more details.
-
-    Parameters
-    ----------
-    aln : Alignment, dict
-        alignment or dictionary where the keys are the sequence ids and
-        the values are the sequences themselves.
-    make_seqlabel : function, optional
-        callback function that takes the seq object and returns a label
-        ``str``. If ``None`` is passed, the following attributes will try to be
-        retrieved in this order and the first to exist will be used:
-        ``id``, ``Label`` or ``Name``. In any other case an integer
-        with the position of the sequence object will be used.
-    line_wrap : int, optional
-        line_wrap: a integer for maximum line width, if ``None`` is passed the
-        full sequence will be used.
-    sort : bool, optional
-        Whether or not the sequences should be sorted by their sequence
-        id, default value is ``True``.
-
-    Returns
-    -------
-    str
-        FASTA formatted string composed of the objects passed in via `seqs`.
-
-    See Also
-    --------
-    skbio.parse.sequences.parse_fasta
-    skbio.alignment.Alignment
-
-    Examples
-    --------
-    Formatting a sequence alignment object into a FASTA file.
-
-    >>> from skbio.alignment import Alignment
-    >>> from skbio.sequence import DNA
-    >>> from skbio.format.sequences import fasta_from_alignment
-    >>> seqs = [DNA("ACC--G-GGTA..", id="seq1"),
-    ...         DNA("TCC--G-GGCA..", id="seqs2")]
-    >>> a1 = Alignment(seqs)
-    >>> print fasta_from_alignment(a1)
-    >seq1
-    ACC--G-GGTA..
-    >seqs2
-    TCC--G-GGCA..
-
-    """
-    warnings.warn(
-        "`fasta_from_alignment` is deprecated and will be removed in "
-        "scikit-bio 0.3.0. Please update your code to use `skbio.io.write` "
-        "or `skbio.Alignment.write`.", DeprecationWarning)
-
-    # check if it's an Alignment object or a dictionary
-    if isinstance(aln, Alignment):
-        order = aln.ids()
-    else:
-        order = aln.keys()
-
-    if sort:
-        order = sorted(order)
-
-    ordered_seqs = []
-    for label in order:
-        seq = aln[label]
-        if isinstance(seq, str):
-            seq = BiologicalSequence(seq, label)
-        ordered_seqs.append(seq)
-    return fasta_from_sequences(ordered_seqs, make_seqlabel=make_seqlabel,
-                                line_wrap=line_wrap)
diff --git a/skbio/format/sequences/fastq.py b/skbio/format/sequences/fastq.py
deleted file mode 100644
index 55a5f2f..0000000
--- a/skbio/format/sequences/fastq.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-import warnings
-
-
-def _phred_to_ascii(a, offset):
-    """Convert Phred quality score to ASCII character with specified offset"""
-    return (a + offset).tostring()
-
-
-def _phred_to_ascii33(a):
-    """Convert Phred quality score to ASCII character with offset of 33"""
-    return _phred_to_ascii(a, 33)
-
-
-def _phred_to_ascii64(a):
-    """Convert Phred quality score to ASCII character with offset of 64"""
-    return _phred_to_ascii(a, 64)
-
-
-def format_fastq_record(seqid, seq, qual, phred_offset=33):
-    """Format a FASTQ record
-
-    .. note:: Deprecated in scikit-bio 0.2.0-dev
-       ``format_fastq_record`` will be removed in scikit-bio 0.3.0. It is
-       replaced by ``write``, which is a more general method for serializing
-       FASTQ-formatted files. ``write`` supports multiple file formats by
-       taking advantage of scikit-bio's I/O registry system. See
-       :mod:`skbio.io` for more details.
-
-    Parameters
-    ----------
-    seqid : bytes
-        The sequence ID
-    seq : bytes or subclass of BiologicalSequence
-        The sequence
-    qual : np.array of int8
-        The quality scores
-    phred_offset : int, either 33 or 64
-        Set a phred offset
-
-    Returns
-    -------
-    bytes : a string representation of a single FASTQ record
-
-    Examples
-    --------
-    >>> from skbio.format.sequences import format_fastq_record
-    >>> from numpy import array, int8
-    >>> seqid = 'seq1'
-    >>> seq = 'AATTGG'
-    >>> qual = array([38, 38, 39, 39, 40, 40], dtype=int8)
-    >>> print format_fastq_record(seqid, seq, qual),
-    @seq1
-    AATTGG
-    +
-    GGHHII
-
-    """
-    warnings.warn(
-        "`format_fastq_record` is deprecated and will be removed in "
-        "scikit-bio 0.3.0. Please update your code to use `skbio.io.write`.",
-        DeprecationWarning)
-
-    if phred_offset == 33:
-        phred_f = _phred_to_ascii33
-    elif phred_offset == 64:
-        phred_f = _phred_to_ascii64
-    else:
-        raise ValueError("Unknown phred offset: %d" % phred_offset)
-
-    return b'\n'.join([b"@" + seqid, seq, b'+', phred_f(qual), b''])
diff --git a/skbio/format/sequences/tests/test_fasta.py b/skbio/format/sequences/tests/test_fasta.py
deleted file mode 100644
index dedc2fc..0000000
--- a/skbio/format/sequences/tests/test_fasta.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-"""Tests for FASTA sequence format writer.
-"""
-from unittest import TestCase, main
-
-from skbio import DNASequence, BiologicalSequence, Alignment
-from skbio.format.sequences import fasta_from_sequences, fasta_from_alignment
-
-
-class FastaTests(TestCase):
-
-    """Tests for Fasta writer.
-    """
-
-    def setUp(self):
-        """Setup for Fasta tests."""
-        self.strings = ['AAAA', 'CCCC', 'gggg', 'uuuu']
-        self.fasta_no_label = '>0\nAAAA\n>1\nCCCC\n>2\ngggg\n>3\nuuuu'
-        self.fasta_with_label =\
-            '>1st\nAAAA\n>2nd\nCCCC\n>3rd\nGGGG\n>4th\nUUUU'
-        self.fasta_with_label_lw2 =\
-            '>1st\nAA\nAA\n>2nd\nCC\nCC\n>3rd\nGG\nGG\n>4th\nUU\nUU'
-        self.alignment_dict = {'1st': 'AAAA', '2nd': 'CCCC', '3rd': 'GGGG',
-                               '4th': 'UUUU'}
-        self.sequence_objects_a = [DNASequence('ACTCGAGATC', 'seq1'),
-                                   DNASequence('GGCCT', 'seq2')]
-        self.sequence_objects_b = [BiologicalSequence('ACTCGAGATC', 'seq1'),
-                                   BiologicalSequence('GGCCT', 'seq2')]
-        seqs = [DNASequence("ACC--G-GGTA..", id="seq1"),
-                DNASequence("TCC--G-GGCA..", id="seqs2")]
-        self.alignment = Alignment(seqs)
-
-    def test_fasta_from_sequence_objects(self):
-        """Check FASTA files are created correctly off of sequence objects"""
-        self.assertEqual(fasta_from_sequences(self.sequence_objects_a),
-                         FASTA_STRING)
-
-        self.assertEqual(fasta_from_sequences(self.sequence_objects_b),
-                         FASTA_STRING)
-
-    def test_fasta_from_sequences(self):
-        """should return correct fasta string."""
-        self.assertEqual(fasta_from_sequences(''), '')
-        self.assertEqual(fasta_from_sequences(self.strings),
-                         self.fasta_no_label)
-
-    def test_fasta_from_alignment(self):
-        """should return correct fasta string."""
-        self.assertEqual(fasta_from_alignment({}), '')
-        self.assertEqual(fasta_from_alignment(self.alignment_dict),
-                         self.fasta_with_label)
-        self.assertEqual(fasta_from_alignment(self.alignment_dict,
-                                              line_wrap=2),
-                         self.fasta_with_label_lw2)
-
-    def test_fasta_from_alignment_from_alignment(self):
-        """should return correct fasta string for alignment object"""
-        # alignment with a few sequences
-        obs = fasta_from_alignment(self.alignment)
-        self.assertEquals('>seq1\nACC--G-GGTA..\n>seqs2\nTCC--G-GGCA..', obs)
-
-        # empty alginment
-        obs = fasta_from_alignment(Alignment([]))
-        self.assertEquals('', obs)
-
-        # alignment with a few sequences
-        obs = fasta_from_alignment(self.alignment, sort=False)
-        self.assertEquals('>seq1\nACC--G-GGTA..\n>seqs2\nTCC--G-GGCA..', obs)
-
-
-FASTA_STRING = '>seq1\nACTCGAGATC\n>seq2\nGGCCT'
-
-if __name__ == "__main__":
-    main()
diff --git a/skbio/format/sequences/tests/test_fastq.py b/skbio/format/sequences/tests/test_fastq.py
deleted file mode 100644
index cecad1f..0000000
--- a/skbio/format/sequences/tests/test_fastq.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-
-import numpy as np
-from unittest import TestCase, main
-
-from skbio.format.sequences import format_fastq_record
-from skbio.format.sequences.fastq import _phred_to_ascii33, _phred_to_ascii64
-
-
-class FASTQFormatTests(TestCase):
-    def setUp(self):
-        self.qual_scores = np.array([38, 39, 40], dtype=np.int8)
-        self.args = (b'abc', b'def', self.qual_scores)
-
-    def test_format_fastq_record_phred_offset_33(self):
-        exp = b"@abc\ndef\n+\nGHI\n"
-        obs = format_fastq_record(*self.args, phred_offset=33)
-        self.assertEqual(obs, exp)
-
-    def test_format_fastq_record_phred_offset_64(self):
-        exp = b"@abc\ndef\n+\nfgh\n"
-        obs = format_fastq_record(*self.args, phred_offset=64)
-        self.assertEqual(obs, exp)
-
-    def test_format_fastq_record_invalid_phred_offset(self):
-        with self.assertRaises(ValueError):
-            format_fastq_record(*self.args, phred_offset=42)
-
-    def test_phred_to_ascii33(self):
-        obs = _phred_to_ascii33(self.qual_scores)
-        self.assertEqual(obs, b'GHI')
-
-    def test_phred_to_ascii64(self):
-        obs = _phred_to_ascii64(self.qual_scores)
-        self.assertEqual(obs, b'fgh')
-
-
-if __name__ == '__main__':
-    main()
diff --git a/skbio/io/__init__.py b/skbio/io/__init__.py
index 5bf5080..71600d1 100644
--- a/skbio/io/__init__.py
+++ b/skbio/io/__init__.py
@@ -6,6 +6,71 @@ File I/O (:mod:`skbio.io`)
 
 This package provides I/O functionality for skbio.
 
+Supported file formats
+----------------------
+For details on what objects are supported by each format,
+see the associated documentation.
+
+.. currentmodule:: skbio.io.format
+.. autosummary::
+   :toctree: generated/
+
+   clustal
+   fasta
+   fastq
+   lsmat
+   newick
+   ordination
+   phylip
+   qseq
+
+.. currentmodule:: skbio.io.registry
+
+
+User functions
+--------------
+
+.. autosummary::
+   :toctree: generated/
+
+   write
+   read
+   sniff
+
+.. currentmodule:: skbio.io
+
+User exceptions and warnings
+----------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   FormatIdentificationWarning
+   ArgumentOverrideWarning
+   UnrecognizedFormatError
+   IOSourceError
+   FileFormatError
+   ClustalFormatError
+   FASTAFormatError
+   FASTQFormatError
+   LSMatFormatError
+   NewickFormatError
+   OrdinationFormatError
+   PhylipFormatError
+   QSeqFormatError
+   QUALFormatError
+
+Subpackages
+-----------
+
+.. autosummary::
+   :toctree: generated/
+
+   registry
+   util
+
+For developer documentation on extending I/O, see :mod:`skbio.io.registry`.
+
 Introduction to I/O
 -------------------
 Reading and writing files (I/O) can be a complicated task:
@@ -17,7 +82,7 @@ Reading and writing files (I/O) can be a complicated task:
   your data.
 * A single object might be writeable to more than one file format. For example,
   an :mod:`skbio.alignment.Alignment` object could be written to FASTA, FASTQ,
-  QSEQ, PHYLIP, or Stockholm formats, just to name a few.
+  QSEQ, or PHYLIP formats, just to name a few.
 * You might not know the exact file format of your file, but you want to read
   it into an appropriate object.
 * You might want to read multiple files into a single object, or write an
@@ -27,22 +92,28 @@ Reading and writing files (I/O) can be a complicated task:
 
 To address these issues (and others), scikit-bio provides a simple, powerful
 interface for dealing with I/O. We accomplish this by using a single I/O
-registry. Below is a description of how to use the registry and how to extend
-it.
+registry.
+
+What kinds of files scikit-bio can use
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+To see a complete list of file-like inputs that can be used for reading,
+writing, and sniffing, see the documentation for :func:`skbio.io.util.open`.
 
 Reading files into scikit-bio
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 There are two ways to read files. The first way is to use the
 procedural interface:
 
-``my_obj = skbio.io.read(<filehandle or filepath>, format='<format here>',
-into=<class to construct>)``
+.. code-block:: python
+
+   my_obj = skbio.io.read(file, format='someformat', into=SomeSkbioClass)
 
 The second is to use the object-oriented (OO) interface which is automatically
 constructed from the procedural interface:
 
-``my_obj = <class to construct>.read(<filehandle or filepath>,
-format='<format here>')``
+.. code-block:: python
+
+   my_obj = SomeSkbioClass.read(file, format='someformat')
 
 For example, to read a `newick` file using both interfaces you would type:
 
@@ -61,9 +132,9 @@ For the OO interface:
 >>> tree
 <TreeNode, name: unnamed, internal node count: 0, tips count: 2>
 
-In the case of ``skbio.io.read`` if `into` is not provided, then a generator
-will be returned. What the generator yields will depend on what format is being
-read.
+In the case of :func:`skbio.io.registry.read` if `into` is not provided, then a
+generator will be returned. What the generator yields will depend on what
+format is being read.
 
 When `into` is provided, format may be omitted and the registry will use its
 knowledge of the available formats for the requested class to infer the correct
@@ -77,11 +148,10 @@ As an example:
 >>> tree
 <TreeNode, name: unnamed, internal node count: 0, tips count: 2>
 
-We call format inference `sniffing`, much like the
-`csv <https://docs.python.org/2/library/csv.html#csv.Sniffer>`_ module of
-Python's standard library. The goal of a `sniffer` is twofold: to identify if a
-file is a specific format, and if it is, to provide `**kwargs` which can be
-used to better parse the file.
+We call format inference `sniffing`, much like the :class:`csv.Sniffer`
+class of Python's standard library. The goal of a `sniffer` is twofold: to
+identify if a file is a specific format, and if it is, to provide `**kwargs`
+which can be used to better parse the file.
 
 .. note:: There is a built-in `sniffer` which results in a useful error message
    if an empty file is provided as input and the format was omitted.
@@ -92,172 +162,23 @@ Just as when reading files, there are two ways to write files.
 
 Procedural Interface:
 
-``skbio.io.write(my_obj, format='<format here>',
-into=<filehandle or filepath>)``
-
-OO Interface:
-
-``my_obj.write(<filehandle or filepath>, format='<format here>')``
-
-In the procedural interface, `format` is required. Without it, scikit-bio does
-not know how you want to serialize an object. OO interfaces define a default
-`format`, so it may not be necessary to include it.
-
-Supported file formats
-^^^^^^^^^^^^^^^^^^^^^^
-For details on what objects are supported by each format,
-see the associated documentation.
-
-.. autosummary::
-   :toctree: generated/
-
-   clustal
-   fasta
-   fastq
-   lsmat
-   newick
-   ordination
-   phylip
-   qseq
-
-Formats are considered to be names which represent a way of encoding a file.
-
-User functions
-^^^^^^^^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
-
-   write
-   read
-   sniff
-
-User exceptions
-^^^^^^^^^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
-
-   RecordError
-   FieldError
-   UnrecognizedFormatError
-   FileFormatError
-   ClustalFormatError
-   FASTAFormatError
-   FASTQFormatError
-   LSMatFormatError
-   NewickFormatError
-   OrdinationFormatError
-   PhylipFormatError
-   QSeqFormatError
-
-User warnings
-^^^^^^^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
-
-   FormatIdentificationWarning
-   ArgumentOverrideWarning
-
-Developer Documentation
------------------------
-To extend I/O in skbio, developers should create a submodule in `skbio/io/`
-named after the file format it implements.
-
-For example, if you were to create readers and writers for a `fasta` file, you
-would create a submodule `skbio/io/fasta.py`.
-In this submodule you would use the following decorators:
-``register_writer``, ``register_reader``, and ``register_sniffer``.
-These associate your functionality to a format string and potentially an skbio
-class. Please see the relevant documenation for more information about these
-functions and the specifications for `readers`, `writers`, and `sniffers`.
-
-Once you are satisfied with the functionality, you will need to ensure that
-`skbio/io/__init__.py` contains an import of your new submodule so the
-decorators are executed on importing the user functions above. Use the function
-``import_module('skbio.io.my_new_format')``.
-
-The following keyword args may not be used when defining new `readers` or
-`writers` as they already have special meaning to the registry system:
-
-- `format`
-- `into`
-- `mode`
-- `verify`
-
-If a keyword argument is a file, such as in the case of `fasta` with `qual`,
-then you can set the default to a specific marker, or sentinel, to indicate to
-the registry that the kwarg should have special handling. For example:
-
 .. code-block:: python
 
-   from skbio.io import FileSentinel
-
-   @register_reader(fasta, object)
-   def fasta_to_object(fh, qual=FileSentinel):
-       ...
-
-After the registry reads your function, it will replace `FileSentinel` with
-`None` allowing you to perform normal checks for kwargs
-(e.g. `if my_kwarg is not None:`). If a user provides input for the kwarg, the
-registry will convert it to an open filehandle.
-
-.. note:: Keyword arguments are not permitted in `sniffers`. `Sniffers` may not
-   raise exceptions; if an exception is thrown by a `sniffer`, the user will be
-   asked to report it on our `issue tracker
-   <https://github.com/biocore/scikit-bio/issues/>`_.
+   skbio.io.write(my_obj, format='someformat', into=file)
 
-When raising errors in readers and writers, the error should be a subclass of
-``FileFormatError`` specific to your new format.
-
-Writing unit tests
-^^^^^^^^^^^^^^^^^^
-Because scikit-bio handles all of the I/O boilerplate, you only need to test
-the actual business logic of your `readers`, `writers`, and `sniffers`. The
-easiest way to accomplish this is to create a list of files and their expected
-results when deserialized. Then you can iterate through the list ensuring the
-expected results occur and that the expected results can be reserialized into
-an equivalent file. This process is called 'roundtripping'.
-
-It is also important to test some invalid inputs and ensure that the correct
-error is raised by your `readers`. Consider using `assertRaises` as a context
-manager like so:
+OO Interface:
 
 .. code-block:: python
 
-   with self.assertRaises(SomeFileFormatErrorSubclass) as cm:
-       do_something_wrong()
-   self.assertIn('action verb or subject of an error', str(cm.exception))
-
-A good example to review when preparing to write your first I/O unit tests is
-the ordination test code (see in ``skbio/io/tests/test_ordination.py``).
+   my_obj.write(file, format='someformat')
 
-Developer functions
-^^^^^^^^^^^^^^^^^^^
-
-.. autosummary::
-    :toctree: generated/
-
-    register_writer
-    register_reader
-    register_sniffer
-    list_write_formats
-    list_read_formats
-    get_writer
-    get_reader
-    get_sniffer
-
-Developer exceptions
-^^^^^^^^^^^^^^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
+In the procedural interface, `format` is required. Without it, scikit-bio does
+not know how you want to serialize an object. OO interfaces define a default
+`format`, so it may not be necessary to include it.
 
-   DuplicateRegistrationError
-   InvalidRegistrationError
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -266,32 +187,25 @@ Developer exceptions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from importlib import import_module
 
-from numpy.testing import Tester
+from skbio.util import TestRunner
 
 from ._warning import FormatIdentificationWarning, ArgumentOverrideWarning
-from ._exception import (DuplicateRegistrationError, InvalidRegistrationError,
-                         RecordError, FieldError, UnrecognizedFormatError,
-                         FileFormatError, ClustalFormatError, FASTAFormatError,
-                         FASTQFormatError, LSMatFormatError, NewickFormatError,
-                         OrdinationFormatError, PhylipFormatError,
-                         QSeqFormatError)
-from ._registry import (write, read, sniff, get_writer, get_reader,
-                        get_sniffer, list_write_formats, list_read_formats,
-                        register_writer, register_reader, register_sniffer,
-                        initialize_oop_interface, FileSentinel)
-
-__all__ = ['write', 'read', 'sniff',
-           'list_write_formats', 'list_read_formats',
-           'get_writer', 'get_reader', 'get_sniffer',
-           'register_writer', 'register_reader', 'register_sniffer',
-           'initialize_oop_interface', 'FileSentinel',
+from ._exception import (UnrecognizedFormatError, FileFormatError,
+                         ClustalFormatError, FASTAFormatError,
+                         IOSourceError, FASTQFormatError, LSMatFormatError,
+                         NewickFormatError, OrdinationFormatError,
+                         PhylipFormatError, QSeqFormatError, QUALFormatError)
+from .registry import write, read, sniff, create_format, io_registry
+from .util import open
 
-           'FormatIdentificationWarning', 'ArgumentOverrideWarning',
+__all__ = ['write', 'read', 'sniff', 'open', 'io_registry', 'create_format',
 
-           'DuplicateRegistrationError', 'InvalidRegistrationError',
-           'RecordError', 'FieldError', 'UnrecognizedFormatError',
+           'FormatIdentificationWarning', 'ArgumentOverrideWarning',
+           'UnrecognizedFormatError', 'IOSourceError',
 
            'FileFormatError',
            'ClustalFormatError',
@@ -301,22 +215,29 @@ __all__ = ['write', 'read', 'sniff',
            'NewickFormatError',
            'OrdinationFormatError',
            'PhylipFormatError',
-           'QSeqFormatError']
+           'QSeqFormatError',
+           'QUALFormatError']
+
 
 # Necessary to import each file format module to have them added to the I/O
 # registry. We use import_module instead of a typical import to avoid flake8
 # unused import errors.
-import_module('skbio.io.clustal')
-import_module('skbio.io.fasta')
-import_module('skbio.io.fastq')
-import_module('skbio.io.lsmat')
-import_module('skbio.io.newick')
-import_module('skbio.io.ordination')
-import_module('skbio.io.phylip')
-import_module('skbio.io.qseq')
+import_module('skbio.io.format.clustal')
+import_module('skbio.io.format.fasta')
+import_module('skbio.io.format.fastq')
+import_module('skbio.io.format.lsmat')
+import_module('skbio.io.format.newick')
+import_module('skbio.io.format.ordination')
+import_module('skbio.io.format.phylip')
+import_module('skbio.io.format.qseq')
+
+
+# This is meant to be a handy indicator to the user that they have done
+# something wrong.
+import_module('skbio.io.format.emptyfile')
 
 # Now that all of our I/O has loaded, we can add the object oriented methods
 # (read and write) to each class which has registered I/O operations.
-initialize_oop_interface()
+io_registry.monkey_patch()
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/io/_exception.py b/skbio/io/_exception.py
index 66c1b31..36af916 100644
--- a/skbio/io/_exception.py
+++ b/skbio/io/_exception.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,19 +6,16 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-
-class FileFormatError(Exception):
-    """Raised when a file cannot be parsed."""
-    pass
+from __future__ import absolute_import, division, print_function
 
 
-class RecordError(FileFormatError):
-    """Raised when a record is bad."""
+class IOSourceError(Exception):
+    """Raised when a file source cannot be resolved."""
     pass
 
 
-class FieldError(RecordError):
-    """Raised when a field within a record is bad."""
+class FileFormatError(Exception):
+    """Raised when a file cannot be parsed."""
     pass
 
 
@@ -39,6 +34,11 @@ class FASTAFormatError(FileFormatError):
     pass
 
 
+class QUALFormatError(FASTAFormatError):
+    """Raised when a ``qual`` formatted file cannot be parsed."""
+    pass
+
+
 class LSMatFormatError(FileFormatError):
     """Raised when a ``lsmat`` formatted file cannot be parsed."""
     pass
@@ -81,15 +81,4 @@ class InvalidRegistrationError(Exception):
 
 class DuplicateRegistrationError(Exception):
     """Raised when a function is already registered in skbio.io"""
-
-    def __init__(self, name=None, fmt=None, cls=None, msg=None):
-        super(DuplicateRegistrationError, self).__init__()
-        if msg:
-            self.args = (msg,)
-        else:
-            if hasattr(cls, '__name__'):
-                classname = cls.__name__
-            else:
-                classname = 'generator'
-            self.args = ("'%s' already has a %s for %s."
-                         % (fmt, name, classname),)
+    pass
diff --git a/skbio/io/_fileobject.py b/skbio/io/_fileobject.py
new file mode 100644
index 0000000..80428bd
--- /dev/null
+++ b/skbio/io/_fileobject.py
@@ -0,0 +1,139 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import io
+
+
+def is_binary_file(file):
+    return isinstance(file, (io.BufferedReader, io.BufferedWriter,
+                             io.BufferedRandom))
+
+# Everything beyond this point will be some kind of hack needed to make
+# everything work. It's not pretty and it doesn't make great sense much
+# of the time. I am very sorry to the poor soul who has to read beyond.
+
+
+class StringIO(io.StringIO):
+    """Treat Bytes the same as Unicode by decoding ascii, for testing only."""
+    def __init__(self, string=None, **kwargs):
+        if isinstance(string, bytes):
+            string = string.decode()
+        super(StringIO, self).__init__(string, **kwargs)
+
+
+class SaneTextIOWrapper(io.TextIOWrapper):
+    def __init__(self, *args, **kwargs):
+        super(SaneTextIOWrapper, self).__init__(*args, **kwargs)
+        self._should_close_buffer = True
+
+    def __del__(self):
+        # Accept the inevitability of the buffer being closed by the destructor
+        # because of this line in Python 2.7:
+        # https://github.com/python/cpython/blob/2.7/Modules/_io/iobase.c#L221
+        self._should_close_buffer = False
+        # Actually close for Python 3 because it is an override.
+        # We can't call super because Python 2 doesn't actually
+        # have a `__del__` method for IOBase (hence this
+        # workaround). Close is idempotent so it won't matter
+        # that Python 2 will end up calling this twice
+        self.close()
+
+    def close(self):
+        # We can't stop Python 2.7 from calling close in the deconstructor
+        # so instead we can prevent the buffer from being closed with a flag.
+
+        # Based on:
+        # https://github.com/python/cpython/blob/2.7/Lib/_pyio.py#L1586
+        if self.buffer is not None and not self.closed:
+            try:
+                self.flush()
+            finally:
+                if self._should_close_buffer:
+                    self.buffer.close()
+
+
+class WrappedBufferedRandom(io.BufferedRandom):
+    def __init__(self, *args, **kwargs):
+        super(WrappedBufferedRandom, self).__init__(*args, **kwargs)
+        self._should_close_raw = True
+
+    def __del__(self):
+        self._should_close_raw = False
+        self.close()
+
+    # Based on:
+    # https://github.com/python/cpython/blob/2.7/Lib/_pyio.py#L732
+    def close(self):
+        if self.raw is not None and not self.closed:
+            try:
+                # may raise BlockingIOError or BrokenPipeError etc
+                self.flush()
+            finally:
+                if self._should_close_raw:
+                    self.raw.close()
+
+
+class CompressedMixin(object):
+    """Act as a bridge between worlds"""
+    def __init__(self, before_file, *args, **kwargs):
+        self.streamable = kwargs.pop('streamable', True)
+        self._should_close_raw = True
+        self._before_file = before_file
+        super(CompressedMixin, self).__init__(*args, **kwargs)
+
+    def __del__(self):
+        self._should_close_raw = False
+        self.close()
+
+    @property
+    def closed(self):
+        return self.raw.closed or self._before_file.closed
+
+    # Based on:
+    # https://github.com/python/cpython/blob/2.7/Lib/_pyio.py#L732
+    def close(self):
+        if self.raw is not None and not self.closed:
+            try:
+                # may raise BlockingIOError or BrokenPipeError etc
+                self.flush()
+            finally:
+                if self._should_close_raw:
+                    self.raw.close()
+                    # The above will not usually close the before_file
+                    # We want the decompression to be transparent, so we don't
+                    # want users to deal with this edge case. Instead we can
+                    # just close the original now that we are being closed.
+                    self._before_file.close()
+
+
+class CompressedBufferedReader(CompressedMixin, io.BufferedReader):
+    pass
+
+
+class CompressedBufferedWriter(CompressedMixin, io.BufferedWriter):
+    pass
+
+
+class IterableStringReaderIO(io.StringIO):
+    def __init__(self, iterable, newline):
+        self._iterable = iterable
+        super(IterableStringReaderIO, self).__init__(u''.join(iterable),
+                                                     newline=newline)
+
+
+class IterableStringWriterIO(IterableStringReaderIO):
+    def close(self):
+        if not self.closed:
+            backup = self.tell()
+            self.seek(0)
+            for line in self:
+                self._iterable.append(line)
+            self.seek(backup)
+        super(IterableStringWriterIO, self).close()
diff --git a/skbio/io/_iosources.py b/skbio/io/_iosources.py
new file mode 100644
index 0000000..2e92d5c
--- /dev/null
+++ b/skbio/io/_iosources.py
@@ -0,0 +1,237 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from six import string_types, text_type
+
+import io
+import gzip
+import bz2file
+from tempfile import gettempdir
+import itertools
+
+import requests
+from cachecontrol import CacheControl
+from cachecontrol.caches import FileCache
+
+from skbio.io import IOSourceError
+from ._fileobject import (IterableStringWriterIO, IterableStringReaderIO,
+                          WrappedBufferedRandom)
+
+
+def get_io_sources():
+    return (
+        # The order of these source is significant as they will short-circuit
+        HTTPSource,
+        FilePathSource,
+        BytesIOSource,
+        BufferedIOSource,
+        TextIOSource,
+        IterableSource
+    )
+
+
+def _compressors():
+    return (
+        GzipCompressor,
+        BZ2Compressor
+    )
+
+
+def get_compression_handler(name):
+    compressors = {c.name: c for c in _compressors()}
+    compressors['auto'] = AutoCompressor
+    return compressors.get(name, False)
+
+
+class IOSource(object):
+    closeable = True
+
+    def __init__(self, file, options):
+        self.file = file
+        self.options = options
+
+    def can_read(self):
+        return False
+
+    def can_write(self):
+        return False
+
+    def get_reader(self):
+        raise NotImplementedError()
+
+    def get_writer(self):
+        raise NotImplementedError()
+
+
+class Compressor(IOSource):
+    streamable = True
+    name = ''
+
+    def can_write(self):
+        return True
+
+
+class FilePathSource(IOSource):
+    def can_read(self):
+        return isinstance(self.file, string_types)
+
+    def can_write(self):
+        return self.can_read()
+
+    def get_reader(self):
+        return io.open(self.file, mode='rb')
+
+    def get_writer(self):
+        return io.open(self.file, mode='wb')
+
+
+class HTTPSource(IOSource):
+    def can_read(self):
+        return (
+            isinstance(self.file, string_types) and
+            requests.compat.urlparse(self.file).scheme in {'http', 'https'})
+
+    def get_reader(self):
+        sess = CacheControl(requests.Session(),
+                            cache=FileCache(gettempdir()))
+        req = sess.get(self.file)
+
+        # if the response is not 200, an exception will be raised
+        req.raise_for_status()
+
+        return io.BufferedReader(io.BytesIO(req.content))
+
+
+class BytesIOSource(IOSource):
+    closeable = False
+
+    def can_read(self):
+        return isinstance(self.file, io.BytesIO)
+
+    def can_write(self):
+        return self.can_read()
+
+    def get_reader(self):
+        return WrappedBufferedRandom(self.file)
+
+    def get_writer(self):
+        return self.get_reader()
+
+
+class BufferedIOSource(IOSource):
+    closeable = False
+
+    def can_read(self):
+        # `peek` is part of the API we want to guarantee, so we can't just look
+        # for io.BufferedIOBase. Despite the fact that the C implementation of
+        # io.BufferedRandom inherits io.BufferedReader/Writer it is not
+        # reflected in an isinstance check, so we need to check for it manually
+        return isinstance(self.file, (io.BufferedReader, io.BufferedRandom))
+
+    def can_write(self):
+        return isinstance(self.file, (io.BufferedWriter, io.BufferedRandom))
+
+    def get_reader(self):
+        return self.file
+
+    def get_writer(self):
+        return self.file
+
+
+class TextIOSource(IOSource):
+    closeable = False
+
+    def can_read(self):
+        return isinstance(self.file, io.TextIOBase) and self.file.readable()
+
+    def can_write(self):
+        return isinstance(self.file, io.TextIOBase) and self.file.writable()
+
+    def get_reader(self):
+        return self.file
+
+    def get_writer(self):
+        return self.file
+
+
+class IterableSource(IOSource):
+    def can_read(self):
+        if hasattr(self.file, '__iter__'):
+            iterator = iter(self.file)
+            head = next(iterator, None)
+            if head is None:
+                self.repaired = []
+                return True
+            if isinstance(head, text_type):
+                self.repaired = itertools.chain([head], iterator)
+                return True
+            else:
+                # We may have mangled a generator at this point, so just abort
+                raise IOSourceError(
+                    "Could not open source: %r (mode: %r)" %
+                    (self.file, self.options['mode']))
+        return False
+
+    def can_write(self):
+        return hasattr(self.file, 'append') and hasattr(self.file, '__iter__')
+
+    def get_reader(self):
+        return IterableStringReaderIO(self.repaired,
+                                      newline=self.options['newline'])
+
+    def get_writer(self):
+        return IterableStringWriterIO(self.file,
+                                      newline=self.options['newline'])
+
+
+class GzipCompressor(Compressor):
+    name = 'gzip'
+    streamable = True
+
+    def can_read(self):
+        return self.file.peek(2)[:2] == b'\x1f\x8b'
+
+    def get_reader(self):
+        return gzip.GzipFile(fileobj=self.file)
+
+    def get_writer(self):
+        return gzip.GzipFile(fileobj=self.file, mode='wb',
+                             compresslevel=self.options['compresslevel'])
+
+
+class BZ2Compressor(Compressor):
+    name = 'bz2'
+    streamable = False
+
+    def can_read(self):
+        return self.file.peek(3)[:3] == b'BZh'
+
+    def get_reader(self):
+        return bz2file.BZ2File(self.file, mode='rb')
+
+    def get_writer(self):
+        return bz2file.BZ2File(self.file, mode='wb',
+                               compresslevel=self.options['compresslevel'])
+
+
+class AutoCompressor(Compressor):
+    streamable = True  # We can' write so it doesn't matter
+    name = 'auto'
+
+    def get_reader(self):
+        for compression_handler in _compressors():
+            compressor = compression_handler(self.file, self.options)
+            if compressor.can_read():
+                return compressor.get_reader()
+
+        return self.file
+
+    def get_writer(self):
+        return self.file
diff --git a/skbio/io/_registry.py b/skbio/io/_registry.py
deleted file mode 100644
index 1d413b6..0000000
--- a/skbio/io/_registry.py
+++ /dev/null
@@ -1,818 +0,0 @@
-from __future__ import absolute_import, division, print_function
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from warnings import warn
-import types
-import copy
-import traceback
-import inspect
-
-from future.builtins import zip
-
-from . import (UnrecognizedFormatError, InvalidRegistrationError,
-               DuplicateRegistrationError, ArgumentOverrideWarning,
-               FormatIdentificationWarning)
-from .util import open_file, open_files
-
-_formats = {}
-_sniffers = {}
-_aliases = {}
-_empty_file_format = '<emptyfile>'
-
-# We create a class and instantiate it dynamically so that exceptions are more
-# obvious and so that only one object exists without copying this line.
-FileSentinel = type('FileSentinel', (object, ), {})()
-
-
-def _override_kwargs(kw, fmt_kw, warn_user):
-    for key in kw:
-        if key in fmt_kw and fmt_kw[key] != kw[key] and warn_user:
-            warn('Best guess was: %s=%s, continuing with user supplied: %s' % (
-                key, str(fmt_kw[key]), str(kw[key])
-            ), ArgumentOverrideWarning)
-        fmt_kw[key] = kw[key]
-    return fmt_kw
-
-
-def register_sniffer(format):
-    """Return a decorator for a sniffer function.
-
-    A decorator factory for sniffer functions. Sniffers may only be registered
-    to simple formats. Sniffers for compound formats are automatically
-    generated from their component simple formats.
-
-    A sniffer function should have at least the following signature:
-    ``<format_name>_sniffer(fh)``. `fh` is **always** an open filehandle.
-    This decorator provides the ability to use filepaths in the same argument
-    position as `fh`. They will automatically be opened and closed.
-
-    **The sniffer must not close the filehandle**, cleanup will be
-    handled external to the sniffer and is not its concern.
-
-    `**kwargs` are not passed to a sniffer, and a sniffer must not use them.
-
-    The job of a sniffer is to determine if a file appears to be in the given
-    format and to 'sniff' out any kwargs that would be of use to a reader
-    function.
-
-    The sniffer **must** return a tuple of (True, <kwargs dict>) if it believes
-    `fh` is a given `format`. Otherwise it should return (False, {}).
-
-    .. note:: Failure to adhere to the above interface specified for a sniffer
-       will result in unintended side-effects.
-
-    The sniffer may determine membership of a file in as many or as few
-    lines of the file as it deems necessary.
-
-    Parameters
-    ----------
-    format : str
-        A format name which a decorated sniffer will be bound to.
-
-    Returns
-    -------
-    function
-        A decorator to be used on a sniffer. The decorator will raise a
-        ``skbio.io.DuplicateRegistrationError`` if there already exists a
-        *sniffer* bound to the `format`.
-
-    See Also
-    --------
-    skbio.io.sniff
-
-    """
-    def decorator(sniffer):
-        if format in _sniffers:
-            raise DuplicateRegistrationError(msg="'%s' already has a sniffer."
-                                             % format)
-
-        def wrapped_sniffer(fp, mode='U', **kwargs):
-            with open_file(fp, mode) as fh:
-                # The reason we do a copy is because we need the sniffer to not
-                # mutate the orginal file while guessing the format. The
-                # naive solution would be to seek to 0 at the end, but that
-                # would break an explicit offset provided by the user. Instead
-                # we create a shallow copy which works out of the box for
-                # file-like object, but does not work for real files. Instead
-                # the name attribute is reused in open for a new filehandle.
-                # Using seek and tell is not viable because in real files tell
-                # reflects the position of the read-ahead buffer and not the
-                # true offset of the iterator.
-                if hasattr(fh, 'name'):
-                    cfh = open(fh.name, fh.mode)
-                else:
-                    cfh = copy.copy(fh)
-                    cfh.seek(0)
-                try:
-                    return sniffer(cfh, **kwargs)
-                except Exception:
-                    warn("'%s' has encountered a problem.\n"
-                         "Please send the following to our issue tracker at\n"
-                         "https://github.com/biocore/scikit-bio/issues\n\n"
-                         "%s" % (sniffer.__name__, traceback.format_exc()),
-                         FormatIdentificationWarning)
-                    return False, {}
-                finally:
-                    cfh.close()
-
-        wrapped_sniffer.__doc__ = sniffer.__doc__
-        wrapped_sniffer.__name__ = sniffer.__name__
-
-        _sniffers[format] = wrapped_sniffer
-        return wrapped_sniffer
-    return decorator
-
-
-def register_reader(format, cls=None):
-    """Return a decorator for a reader function.
-
-    A decorator factory for reader functions.
-
-    A reader function should have at least the following signature:
-    ``<format_name>_to_<class_name_or_generator>(fh)``. `fh` is **always** an
-    open filehandle. This decorator provides the ability to use filepaths in
-    the same argument position as `fh`. They will automatically be opened and
-    closed.
-
-    **The reader must not close the filehandle**, cleanup will be
-    handled external to the reader and is not its concern. This is true even
-    in the case of generators.
-
-    Any additional `**kwargs` will be passed to the reader and may
-    be used if necessary.
-
-    The reader **must** return an instance of `cls` if `cls` is not None.
-    Otherwise the reader must return a generator. The generator need not deal
-    with closing the `fh`. That is already handled by this decorator.
-
-    .. note:: Failure to adhere to the above interface specified for a reader
-       will result in unintended side-effects.
-
-    Parameters
-    ----------
-    format : str
-        A format name which a decorated reader will be bound to.
-    cls : type, optional
-        The class which a decorated reader will be bound to. When `cls` is None
-        the reader will be bound as returning a generator.
-        Default is None.
-
-    Returns
-    -------
-    function
-        A decorator to be used on a reader. The decorator will raise a
-        ``skbio.io.DuplicateRegistrationError`` if there already exists a
-        *reader* bound to the same permutation of `fmt` and `cls`.
-
-    See Also
-    --------
-    skbio.io.read
-
-    """
-    def decorator(reader):
-        format_class = _formats.setdefault(format, {}).setdefault(cls, {})
-
-        if 'reader' in format_class:
-            raise DuplicateRegistrationError('reader', format, cls)
-
-        file_args = []
-        reader_spec = inspect.getargspec(reader)
-        if reader_spec.defaults is not None:
-            # Concept from http://stackoverflow.com/a/12627202/579416
-            for key, default in zip(
-                    reader_spec.args[-len(reader_spec.defaults):],
-                    reader_spec.defaults):
-                if default is FileSentinel:
-                    file_args.append(key)
-
-        # We wrap the reader so that basic file handling can be managed
-        # externally from the business logic.
-        if cls is None:
-            def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
-                file_keys = []
-                files = [fp]
-                for file_arg in file_args:
-                    if file_arg in kwargs:
-                        if kwargs[file_arg] is not None:
-                            file_keys.append(file_arg)
-                            files.append(kwargs[file_arg])
-                    else:
-                        kwargs[file_arg] = None
-
-                with open_files(files, mode) as fhs:
-                    try:
-                        for key, fh in zip(file_keys, fhs[1:]):
-                            kwargs[key] = fh
-
-                        generator = reader(fhs[0], **kwargs)
-                        if not isinstance(generator, types.GeneratorType):
-                            # Raise an exception to be handled next line,
-                            # because although reader executed without error,
-                            # it is not a generator.
-                            raise Exception()
-                    # If an exception is thrown at this point, it cannot
-                    # be a generator. If there was a `yield` statment, then
-                    # Python would have returned a generator regardless of the
-                    # content. This does not preclude the generator from
-                    # throwing exceptions.
-                    except Exception:
-                            raise InvalidRegistrationError("'%s' is not a "
-                                                           "generator." %
-                                                           reader.__name__)
-
-                    while True:
-                        yield next(generator)
-
-        else:
-            # When an object is instantiated we don't need to worry about the
-            # original position at every step, only at the end.
-            def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
-                file_keys = []
-                files = [fp]
-                for file_arg in file_args:
-                    if file_arg in kwargs:
-                        if kwargs[file_arg] is not None:
-                            file_keys.append(file_arg)
-                            files.append(kwargs[file_arg])
-                    else:
-                        kwargs[file_arg] = None
-
-                with open_files(files, mode) as fhs:
-                    for key, fh in zip(file_keys, fhs[1:]):
-                        kwargs[key] = fh
-                    return reader(fhs[0], **kwargs)
-
-        wrapped_reader.__doc__ = reader.__doc__
-        wrapped_reader.__name__ = reader.__name__
-
-        format_class['reader'] = wrapped_reader
-        return wrapped_reader
-    return decorator
-
-
-def register_writer(format, cls=None):
-    """Return a decorator for a writer function.
-
-    A decorator factory for writer functions.
-
-    A writer function should have at least the following signature:
-    ``<class_name_or_generator>_to_<format_name>(obj, fh)``. `fh` is **always**
-    an open filehandle. This decorator provides the ability to use filepaths in
-    the same argument position as `fh`. They will automatically be opened and
-    closed.
-
-    **The writer must not close the filehandle**, cleanup will be
-    handled external to the reader and is not its concern.
-
-    Any additional `**kwargs` will be passed to the writer and may be used if
-    necessary.
-
-    The writer must not return a value. Instead it should only mutate the `fh`
-    in a way consistent with it's purpose.
-
-    If the writer accepts a generator, it should exhaust the generator to
-    ensure that the potentially open filehandle backing said generator is
-    closed.
-
-    .. note:: Failure to adhere to the above interface specified for a writer
-       will result in unintended side-effects.
-
-    Parameters
-    ----------
-    format : str
-        A format name which a decorated writer will be bound to.
-    cls : type, optional
-        The class which a decorated writer will be bound to. If `cls` is None
-        the writer will be bound as expecting a generator.
-        Default is None.
-
-    Returns
-    -------
-    function
-        A decorator to be used on a writer. The decorator will raise a
-        ``skbio.io.DuplicateRegistrationError`` if there already exists a
-        *writer* bound to the same permutation of `fmt` and `cls`.
-
-    See Also
-    --------
-    skbio.io.write
-    skbio.io.get_writer
-
-    """
-    def decorator(writer):
-        format_class = _formats.setdefault(format, {}).setdefault(cls, {})
-
-        if 'writer' in format_class:
-            raise DuplicateRegistrationError('writer', format, cls)
-
-        file_args = []
-        writer_spec = inspect.getargspec(writer)
-        if writer_spec.defaults is not None:
-            # Concept from http://stackoverflow.com/a/12627202/579416
-            for key, default in zip(
-                    writer_spec.args[-len(writer_spec.defaults):],
-                    writer_spec.defaults):
-                if default is FileSentinel:
-                    file_args.append(key)
-
-        # We wrap the writer so that basic file handling can be managed
-        # externally from the business logic.
-        def wrapped_writer(obj, fp, mode='w', **kwargs):
-            file_keys = []
-            files = [fp]
-            for file_arg in file_args:
-                if file_arg in kwargs:
-                    if kwargs[file_arg] is not None:
-                        file_keys.append(file_arg)
-                        files.append(kwargs[file_arg])
-                else:
-                    kwargs[file_arg] = None
-
-            with open_files(files, mode) as fhs:
-                for key, fh in zip(file_keys, fhs[1:]):
-                    kwargs[key] = fh
-                writer(obj, fhs[0], **kwargs)
-
-        wrapped_writer.__doc__ = writer.__doc__
-        wrapped_writer.__name__ = writer.__name__
-
-        format_class['writer'] = wrapped_writer
-        return wrapped_writer
-    return decorator
-
-
-def list_read_formats(cls):
-    """Return a list of available read formats for a given `cls` type.
-
-    Parameters
-    ----------
-    cls : type
-        The class which will be used to determine what read formats exist for
-        an instance of `cls`.
-
-    Returns
-    -------
-    list
-        A list of available read formats for an instance of `cls`. List may be
-        empty.
-
-    See Also
-    --------
-    skbio.io.register_reader
-
-    """
-    return _rw_list_formats('reader', cls)
-
-
-def list_write_formats(cls):
-    """Return a list of available write formats for a given `cls` instance.
-
-    Parameters
-    ----------
-    cls : type
-        The class which will be used to determine what write formats exist for
-        an instance of `cls`.
-
-    Returns
-    -------
-    list
-        A list of available write formats for an instance of `cls`. List may be
-        empty.
-
-    See Also
-    --------
-    skbio.io.register_writer
-
-    """
-    return _rw_list_formats('writer', cls)
-
-
-def _rw_list_formats(name, cls):
-    formats = []
-    for fmt in _formats:
-        if cls in _formats[fmt] and name in _formats[fmt][cls]:
-            formats.append(fmt)
-    return formats
-
-
-def get_sniffer(format):
-    """Return a sniffer for a format.
-
-    Parameters
-    ----------
-    format : str
-        A format string which has a registered sniffer.
-
-    Returns
-    -------
-    function or None
-        Returns a sniffer function if one exists for the given `fmt`.
-        Otherwise it will return None.
-
-    See Also
-    --------
-    skbio.io.register_sniffer
-
-    """
-    return _sniffers.get(format, None)
-
-
-def get_reader(format, cls=None):
-    """Return a reader for a format.
-
-    Parameters
-    ----------
-    format : str
-        A registered format string.
-    cls : type, optional
-        The class which the reader will return an instance of. If `cls` is
-        None, the reader will return a generator.
-        Default is None.
-
-    Returns
-    -------
-    function or None
-        Returns a reader function if one exists for a given `fmt` and `cls`.
-        Otherwise it will return None.
-
-    See Also
-    --------
-    skbio.io.register_reader
-
-    """
-    return _rw_getter('reader', format, cls)
-
-
-def get_writer(format, cls=None):
-    """Return a writer for a format.
-
-    Parameters
-    ----------
-    format : str
-        A registered format string.
-    cls : type, optional
-        The class which the writer will expect an instance of. If `cls` is
-        None, the writer will expect a generator that is identical to what
-        is returned by ``get_reader(<some_format>, None)``.
-        Default is None.
-
-    Returns
-    -------
-    function or None
-        Returns a writer function if one exists for a given `fmt` and `cls`.
-        Otherwise it will return None.
-
-    See Also
-    --------
-    skbio.io.register_writer
-    skbio.io.get_reader
-
-    """
-    return _rw_getter('writer', format, cls)
-
-
-def _rw_getter(name, fmt, cls):
-    if fmt in _formats:
-        if cls in _formats[fmt] and name in _formats[fmt][cls]:
-                return _formats[fmt][cls][name]
-    return None
-
-
-def sniff(fp, cls=None, mode='U'):
-    """Attempt to guess the format of a file and return format str and kwargs.
-
-    Parameters
-    ----------
-    fp : filepath or filehandle
-        The provided file to guess the format of. Filepaths are automatically
-        closed; filehandles are the responsibility of the caller.
-    cls : type, optional
-        A provided class that restricts the search for the format. Only formats
-        which have a registered reader or writer for the given `cls` will be
-        tested.
-        Default is None.
-
-    Returns
-    -------
-    (str, kwargs)
-        A format name and kwargs for the corresponding reader.
-
-    Raises
-    ------
-    UnrecognizedFormatError
-        This occurs when the format is not 'claimed' by any registered sniffer
-        or when the format is ambiguous and has been 'claimed' by more than one
-        sniffer.
-
-    See Also
-    --------
-    skbio.io.register_sniffer
-
-    """
-    possibles = []
-    for fmt in _sniffers:
-        if cls is not None and fmt != _empty_file_format and (
-                fmt not in _formats or cls not in _formats[fmt]):
-            continue
-        format_sniffer = _sniffers[fmt]
-        is_format, fmt_kwargs = format_sniffer(fp, mode=mode)
-        if is_format:
-            possibles.append(fmt)
-            kwargs = fmt_kwargs
-
-    if not possibles:
-        raise UnrecognizedFormatError("Cannot guess the format for %s."
-                                      % str(fp))
-    if len(possibles) > 1:
-        raise UnrecognizedFormatError("File format is ambiguous, may be"
-                                      " one of %s." % str(possibles))
-    return possibles[0], kwargs
-
-
-def read(fp, format=None, into=None, verify=True, mode='U', **kwargs):
-    """Read a supported skbio file format into an instance or a generator.
-
-    This function is able to reference and execute all *registered* read
-    operations in skbio.
-
-    Parameters
-    ----------
-    fp : filepath or filehandle
-        The location to read the given `format` `into`. Filepaths are
-        automatically closed when read; filehandles are the responsibility
-        of the caller. In the case of a generator, a filepath will be closed
-        when ``StopIteration`` is raised; filehandles are still the
-        responsibility of the caller.
-    format : str, optional
-        The format must be a format name with a reader for the given
-        `into` class. If a `format` is not provided or is None, all
-        registered sniffers for the provied `into` class will be evaluated to
-        attempt to guess the format.
-        Default is None.
-    into : type, optional
-        A class which has a registered reader for a given `format`. If `into`
-        is not provided or is None, read will return a generator.
-        Default is None.
-    verify : bool, optional
-        Whether or not to confirm the format of a file if `format` is provided.
-        Will raise a ``skbio.io.FormatIdentificationWarning`` if the sniffer of
-        `format` returns False.
-        Default is True.
-    mode : str, optional
-        The read mode. This is passed to `open(fp, mode)` internally.
-        Default is 'U'
-    kwargs : dict, optional
-        Will be passed directly to the appropriate reader.
-
-    Returns
-    -------
-    object or generator
-        If `into` is not None, an instance of the `into` class will be
-        provided with internal state consistent with the provided file.
-        If `into` is None, a generator will be returned.
-
-    Raises
-    ------
-    ValueError
-        Raised when `format` and `into` are both None.
-    skbio.io.UnrecognizedFormatError
-        Raised when a reader could not be found for a given `format` or the
-        format could not be guessed.
-    skbio.io.FormatIdentificationWarning
-        Raised when `verify` is True and the sniffer of a `format` provided a
-        kwarg value that did not match the user's kwarg value.
-
-    See Also
-    --------
-    skbio.io.register_reader
-    skbio.io.register_sniffer
-
-    """
-    if format is None and into is None:
-        raise ValueError("`format` and `into` cannot both be None.")
-
-    if format is None:
-        format, fmt_kwargs = sniff(fp, cls=into, mode=mode)
-        kwargs = _override_kwargs(kwargs, fmt_kwargs, verify)
-    elif verify:
-        sniffer = get_sniffer(format)
-        if sniffer is not None:
-            is_format, fmt_kwargs = sniffer(fp)
-            if not is_format:
-                warn("%s could not be positively identified as %s file." %
-                     (str(fp), format),
-                     FormatIdentificationWarning)
-            else:
-                kwargs = _override_kwargs(kwargs, fmt_kwargs, True)
-
-    reader = get_reader(format, into)
-    if reader is None:
-        raise UnrecognizedFormatError("Cannot read %s into %s, no reader "
-                                      "found." % (format, into.__name__
-                                                  if into is not None
-                                                  else 'generator'))
-    return reader(fp, mode=mode, **kwargs)
-
-
-def write(obj, format, into, mode='w', **kwargs):
-    """Write a supported skbio file format from an instance or a generator.
-
-    This function is able to reference and execute all *registered* write
-    operations in skbio.
-
-    Parameters
-    ----------
-    obj : object
-        The object must have a registered writer for a provided `format`.
-    format : str
-        The format must be a registered format name with a writer for the given
-        `obj`.
-    into : filepath or filehandle
-        The location to write the given `format` from `obj` into. Filepaths are
-        automatically closed when written; filehandles are the responsibility
-        of the caller.
-    mode : str, optional
-        The write mode. This is passed to `open(fp, mode)` internally.
-        Default is 'w'.
-    kwargs : dict, optional
-        Will be passed directly to the appropriate writer.
-
-    Raises
-    ------
-    skbio.io.UnrecognizedFormatError
-        Raised when a writer could not be found for the given `format` and
-        `obj`.
-
-    See Also
-    --------
-    skbio.io.register_writer
-
-    """
-    cls = None
-    if not isinstance(obj, types.GeneratorType):
-        cls = obj.__class__
-    writer = get_writer(format, cls)
-    if writer is None:
-        raise UnrecognizedFormatError("Cannot write %s into %s, no %s writer "
-                                      "found." % (format, str(into),
-                                                  'generator' if cls is None
-                                                  else str(cls)))
-
-    writer(obj, into, mode=mode, **kwargs)
-
-
-# This is meant to be a handy indicator to the user that they have done
-# something wrong.
- at register_sniffer(_empty_file_format)
-def empty_file_sniffer(fh):
-    for line in fh:
-        if line.strip():
-            return False, {}
-    return True, {}
-
-
-def initialize_oop_interface():
-    classes = set()
-    # Find each potential class
-    for fmt in _formats:
-        for cls in _formats[fmt]:
-            classes.add(cls)
-    # Add readers and writers for each class
-    for cls in classes:
-        if cls is not None:
-            _apply_read(cls)
-            _apply_write(cls)
-
-
-def _apply_read(cls):
-    """Add read method if any formats have a registered reader for `cls`."""
-    skbio_io_read = globals()['read']
-    read_formats = list_read_formats(cls)
-    if read_formats:
-        @classmethod
-        def read(cls, fp, format=None, **kwargs):
-            return skbio_io_read(fp, into=cls, format=format, **kwargs)
-
-        read.__func__.__doc__ = _read_docstring % (
-            cls.__name__,
-            _formats_for_docs(read_formats),
-            cls.__name__,
-            cls.__name__,
-            cls.__name__,
-            _import_paths(read_formats)
-        )
-        cls.read = read
-
-
-def _apply_write(cls):
-    """Add write method if any formats have a registered writer for `cls`."""
-    skbio_io_write = globals()['write']
-    write_formats = list_write_formats(cls)
-    if write_formats:
-        if not hasattr(cls, 'default_write_format'):
-            raise NotImplementedError(
-                "Classes with registered writers must provide a "
-                "`default_write_format`. Please add `default_write_format` to"
-                " '%s'." % cls.__name__)
-
-        def write(self, fp, format=cls.default_write_format, **kwargs):
-            skbio_io_write(self, into=fp, format=format, **kwargs)
-
-        write.__doc__ = _write_docstring % (
-            cls.__name__,
-            _formats_for_docs(write_formats),
-            cls.__name__,
-            cls.default_write_format,
-            _import_paths(write_formats)
-        )
-        cls.write = write
-
-
-def _import_paths(formats):
-    lines = []
-    for fmt in formats:
-        lines.append("skbio.io." + fmt)
-    return '\n'.join(lines)
-
-
-def _formats_for_docs(formats):
-    lines = []
-    for fmt in formats:
-        lines.append("- ``'%s'`` (:mod:`skbio.io.%s`)" % (fmt, fmt))
-    return '\n'.join(lines)
-
-
-_read_docstring = """Create a new ``%s`` instance from a file.
-
-This is a convenience method for :mod:`skbio.io.read`. For more
-information about the I/O system in scikit-bio, please see
-:mod:`skbio.io`.
-
-Supported file formats include:
-
-%s
-
-Parameters
-----------
-fp : filepath or filehandle
-    The location to read the given `format`. Filepaths are
-    automatically closed when read; filehandles are the
-    responsibility of the caller.
-format : str, optional
-    The format must be a format name with a reader for ``%s``.
-    If a `format` is not provided or is None, it will attempt to
-    guess the format.
-kwargs : dict, optional
-    Keyword arguments passed to :mod:`skbio.io.read` and the file
-    format reader for ``%s``.
-
-Returns
--------
-%s
-    A new instance.
-
-See Also
---------
-write
-skbio.io.read
-%s
-
-"""
-
-_write_docstring = """Write an instance of ``%s`` to a file.
-
-This is a convenience method for :mod:`skbio.io.write`. For more
-information about the I/O system in scikit-bio, please see
-:mod:`skbio.io`.
-
-Supported file formats include:
-
-%s
-
-Parameters
-----------
-fp : filepath or filehandle
-    The location to write the given `format` into. Filepaths are
-    automatically closed when written; filehandles are the
-    responsibility of the caller.
-format : str
-    The format must be a registered format name with a writer for
-    ``%s``.
-    Default is `'%s'`.
-kwargs : dict, optional
-    Keyword arguments passed to :mod:`skbio.io.write` and the
-    file format writer.
-
-See Also
---------
-read
-skbio.io.write
-%s
-
-"""
diff --git a/skbio/io/_warning.py b/skbio/io/_warning.py
index b049420..c7e44ef 100644
--- a/skbio/io/_warning.py
+++ b/skbio/io/_warning.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,6 +6,8 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 
 class FormatIdentificationWarning(Warning):
     """Warn when the sniffer of a format cannot confirm the format."""
diff --git a/skbio/parse/__init__.py b/skbio/io/format/__init__.py
similarity index 72%
rename from skbio/parse/__init__.py
rename to skbio/io/format/__init__.py
index 18cec47..f85db28 100644
--- a/skbio/parse/__init__.py
+++ b/skbio/io/format/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,5 +6,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
-test = Tester().test
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
+
+test = TestRunner(__file__).test
diff --git a/skbio/io/_base.py b/skbio/io/format/_base.py
similarity index 76%
rename from skbio/io/_base.py
rename to skbio/io/format/_base.py
index 23776ab..e031c9b 100644
--- a/skbio/io/_base.py
+++ b/skbio/io/format/_base.py
@@ -6,31 +6,21 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
 from future.builtins import range
 
 import re
 import warnings
 
+import numpy as np
+
 from skbio.util import cardinal_to_ordinal
 
 _whitespace_regex = re.compile(r'\s')
 _newline_regex = re.compile(r'\n')
 
 
-def _chunk_str(s, n, char):
-    """Insert `char` character every `n` characters in string `s`.
-
-    Canonically pronounced "chunkster".
-
-    """
-    # Modified from http://stackoverflow.com/a/312464/3776794
-    if n < 1:
-        raise ValueError(
-            "Cannot split string into chunks with n=%d. n must be >= 1." % n)
-    return char.join((s[i:i+n] for i in range(0, len(s), n)))
-
-
 def _decode_qual_to_phred(qual_str, variant=None, phred_offset=None):
     phred_offset, phred_range = _get_phred_offset_and_range(
         variant, phred_offset,
@@ -41,16 +31,13 @@ def _decode_qual_to_phred(qual_str, variant=None, phred_offset=None):
          "scikit-bio. Please see the following scikit-bio issue to "
          "track progress on this:\n\t"
          "https://github.com/biocore/scikit-bio/issues/719"])
+    qual = np.fromstring(qual_str, dtype=np.uint8) - phred_offset
 
-    phred = []
-    for c in qual_str:
-        score = ord(c) - phred_offset
-        if phred_range[0] <= score <= phred_range[1]:
-            phred.append(score)
-        else:
-            raise ValueError("Decoded Phred score %d is out of range [%d, %d]."
-                             % (score, phred_range[0], phred_range[1]))
-    return phred
+    if np.any((qual > phred_range[1]) | (qual < phred_range[0])):
+        raise ValueError("Decoded Phred score is out of range [%d, %d]."
+                         % (phred_range[0], phred_range[1]))
+
+    return qual
 
 
 def _encode_phred_to_qual(phred, variant=None, phred_offset=None):
@@ -150,7 +137,8 @@ def _parse_fasta_like_header(line):
 
 
 def _format_fasta_like_records(generator, id_whitespace_replacement,
-                               description_newline_replacement, require_qual):
+                               description_newline_replacement, require_qual,
+                               lowercase=None):
     if ((id_whitespace_replacement is not None and
          '\n' in id_whitespace_replacement) or
         (description_newline_replacement is not None and
@@ -160,17 +148,26 @@ def _format_fasta_like_records(generator, id_whitespace_replacement,
             "sequence IDs, nor to replace newlines in sequence descriptions.")
 
     for idx, seq in enumerate(generator):
+
         if len(seq) < 1:
             raise ValueError(
                 "%s sequence does not contain any characters (i.e., it is an "
                 "empty/blank sequence). Writing empty sequences is not "
                 "supported." % cardinal_to_ordinal(idx + 1))
 
-        id_ = seq.id
+        if 'id' in seq.metadata:
+            id_ = '%s' % seq.metadata['id']
+        else:
+            id_ = ''
+
         if id_whitespace_replacement is not None:
             id_ = _whitespace_regex.sub(id_whitespace_replacement, id_)
 
-        desc = seq.description
+        if 'description' in seq.metadata:
+            desc = seq.metadata['description']
+        else:
+            desc = ''
+
         if description_newline_replacement is not None:
             desc = _newline_regex.sub(description_newline_replacement, desc)
 
@@ -179,9 +176,44 @@ def _format_fasta_like_records(generator, id_whitespace_replacement,
         else:
             header = id_
 
-        if require_qual and not seq.has_quality():
+        if require_qual and 'quality' not in seq.positional_metadata:
             raise ValueError(
                 "Cannot write %s sequence because it does not have quality "
                 "scores associated with it." % cardinal_to_ordinal(idx + 1))
 
-        yield header, seq.sequence, seq.quality
+        qual = None
+        if 'quality' in seq.positional_metadata:
+            qual = seq.positional_metadata['quality'].values
+
+        if lowercase is not None:
+            if hasattr(seq, 'lowercase'):
+                seq_str = seq.lowercase(lowercase)
+            else:
+                raise AttributeError("lowercase specified but class %s does "
+                                     "not support lowercase functionality" %
+                                     seq.__class__.__name__)
+        else:
+            seq_str = str(seq)
+        yield header, "%s" % seq_str, qual
+
+
+def _line_generator(fh, skip_blanks=False):
+    for line in fh:
+        line = line.strip()
+        if line or not skip_blanks:
+            yield line
+
+
+def _too_many_blanks(fh, max_blanks):
+    count = 0
+    too_many = False
+    for line in _line_generator(fh, skip_blanks=False):
+        if line:
+            break
+        else:
+            count += 1
+            if count > max_blanks:
+                too_many = True
+                break
+    fh.seek(0)
+    return too_many
diff --git a/skbio/io/clustal.py b/skbio/io/format/clustal.py
similarity index 79%
rename from skbio/io/clustal.py
rename to skbio/io/format/clustal.py
index c29fa98..f6253dd 100644
--- a/skbio/io/clustal.py
+++ b/skbio/io/format/clustal.py
@@ -1,8 +1,8 @@
 r"""
-Clustal format (:mod:`skbio.io.clustal`)
-========================================
+Clustal format (:mod:`skbio.io.format.clustal`)
+===============================================
 
-.. currentmodule:: skbio.io.clustal
+.. currentmodule:: skbio.io.format.clustal
 
 Clustal format (``clustal``) stores multiple sequence alignments. This format
 was originally introduced in the Clustal package [1]_.
@@ -41,10 +41,8 @@ subsequences (not included in the examples below).
    :mod:`skbio.sequence`. The specific lexicon that is validated against
    depends on the type of sequences stored in the alignment.
 
-
 Examples
 --------
-
 Assume we have a clustal-formatted file with the following contents::
 
     CLUSTAL W (1.82) multiple sequence alignment
@@ -57,39 +55,32 @@ Assume we have a clustal-formatted file with the following contents::
     def   ---------------CGCGAUGCAUGCAU-CGAU
     xyz   -----------CAUGCAUCGUACGUACGCAUGAC
 
-We can use the following code to read a clustal file:
+We can use the following code to read a clustal file into an ``Alignment``:
 
->>> from StringIO import StringIO
->>> from skbio import read
 >>> from skbio import Alignment
->>> clustal_f = StringIO('abc   GCAUGCAUCUGCAUACGUACGUACGCAUGCA\n'
-...                      'def   -------------------------------\n'
-...                      'xyz   -------------------------------\n'
-...                      '\n'
-...                      'abc   GUCGAUACAUACGUACGUCGGUACGU-CGAC\n'
-...                      'def   ---------------CGUGCAUGCAU-CGAU\n'
-...                      'xyz   -----------CAUUCGUACGUACGCAUGAC\n')
->>> for dna in read(clustal_f, format="clustal", into=Alignment):
-...     print(dna.id)
-...     print(dna.sequence)
-abc
-GCAUGCAUCUGCAUACGUACGUACGCAUGCAGUCGAUACAUACGUACGUCGGUACGU-CGAC
-def
-----------------------------------------------CGUGCAUGCAU-CGAU
-xyz
-------------------------------------------CAUUCGUACGUACGCAUGAC
-
-We can use the following code to write to a clustal-formatted file:
-
->>> from skbio import Alignment, DNA
->>> from skbio.io import write
->>> seqs = [DNA('ACCGTTGTA-GTAGCT', id='seq1'),
-...         DNA('A--GTCGAA-GTACCT', id='sequence-2'),
-...         DNA('AGAGTTGAAGGTATCT', id='3')]
+>>> clustal_f = [u'CLUSTAL W (1.82) multiple sequence alignment\n',
+...              u'\n',
+...              u'abc   GCAUGCAUCUGCAUACGUACGUACGCAUGCA\n',
+...              u'def   -------------------------------\n',
+...              u'xyz   -------------------------------\n',
+...              u'\n',
+...              u'abc   GUCGAUACAUACGUACGUCGGUACGU-CGAC\n',
+...              u'def   ---------------CGUGCAUGCAU-CGAU\n',
+...              u'xyz   -----------CAUUCGUACGUACGCAUGAC\n']
+>>> Alignment.read(clustal_f, format="clustal")
+<Alignment: n=3; mean +/- std length=62.00 +/- 0.00>
+
+We can use the following code to write an ``Alignment`` to a clustal-formatted
+file:
+
+>>> from io import StringIO
+>>> from skbio import DNA
+>>> seqs = [DNA('ACCGTTGTA-GTAGCT', metadata={'id': 'seq1'}),
+...         DNA('A--GTCGAA-GTACCT', metadata={'id': 'sequence-2'}),
+...         DNA('AGAGTTGAAGGTATCT', metadata={'id': '3'})]
 >>> aln = Alignment(seqs)
->>> from StringIO import StringIO
 >>> fh = StringIO()
->>> aln.write(fh, format='clustal')
+>>> _ = aln.write(fh, format='clustal')
 >>> print(fh.getvalue()) # doctest: +NORMALIZE_WHITESPACE
 CLUSTAL
 <BLANKLINE>
@@ -104,26 +95,29 @@ References
 ----------
 .. [1] http://www.sciencedirect.com/science/article/pii/0378111988903307
 .. [2] http://web.mit.edu/meme_v4.9.0/doc/clustalw-format.html
-"""
 
+"""
 
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-from __future__ import absolute_import, division, print_function
+# ----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
 
-from skbio.parse.record import DelimitedSplitter
-from skbio.io import (register_reader, register_writer, register_sniffer,
-                      ClustalFormatError)
-from skbio.sequence import BiologicalSequence
+from skbio.io import create_format, ClustalFormatError
+from skbio.sequence import Sequence
 from skbio.alignment import Alignment
 
 
-def _label_line_parser(record, splitter, strict=True):
+clustal = create_format('clustal')
+
+
+def _label_line_parser(record, strict=True):
     """Returns dict mapping list of data to labels, plus list with field order.
 
     Field order contains labels in order encountered in file.
@@ -135,14 +129,15 @@ def _label_line_parser(record, splitter, strict=True):
     labels = []
     result = {}
     for line in record:
-        try:
-            key, val = splitter(line.rstrip())
-        except:
+        split_line = line.strip().rsplit(None, 1)
 
+        if len(split_line) == 2:
+            key, val = split_line
+        else:
             if strict:
                 raise ClustalFormatError(
-                    "Failed to extract key and value from line %s" %
-                    line)
+                    "Failed to parse sequence identifier and subsequence from "
+                    "the following line: %r" % line)
             else:
                 continue  # just skip the line if not strict
 
@@ -162,8 +157,6 @@ def _is_clustal_seq_line(line):
     return line and (not line[0].isspace()) and\
         (not line.startswith('CLUSTAL')) and (not line.startswith('MUSCLE'))
 
-last_space = DelimitedSplitter(None, -1)
-
 
 def _delete_trailing_number(line):
     """Deletes trailing number from a line.
@@ -218,7 +211,7 @@ def _check_length(data, labels, num_seqs_check=None):
     return True
 
 
- at register_sniffer("clustal")
+ at clustal.sniffer()
 def _clustal_sniffer(fh):
     # Strategy
     #   The following conditions preclude a file from being clustal
@@ -228,10 +221,13 @@ def _clustal_sniffer(fh):
     #       * One of the sequence ids is not immediately
     #         followed by a subsequence
     empty = True
+    if fh.read(7) != 'CLUSTAL':
+        return False, {}
+    fh.seek(0)
     try:
         records = map(_delete_trailing_number,
                       filter(_is_clustal_seq_line, fh))
-        data, labels = _label_line_parser(records, last_space, strict=True)
+        data, labels = _label_line_parser(records, strict=True)
         if len(data) > 0:
             empty = False
         # Only check first 50 sequences
@@ -243,19 +239,19 @@ def _clustal_sniffer(fh):
     return not empty, {}
 
 
- at register_writer('clustal', Alignment)
+ at clustal.writer(Alignment)
 def _alignment_to_clustal(obj, fh):
     r"""writes aligned sequences to a specified file
     Parameters
     ----------
     obj: Alignment object
-        An alignment object containing a set of BiologicalSequence objects
+        An alignment object containing a set of Sequence objects
     fh: open file handle object
         An open file handle object containing Clustal sequences.
 
     """
     clen = 60  # Max length of clustal lines
-    names, seqs = zip(*[(s.id, s.sequence) for s in obj])
+    names, seqs = zip(*[(s.metadata['id'], str(s)) for s in obj])
     nameLen = max(map(len, names))
     seqLen = max(map(len, seqs))
     fh.write('CLUSTAL\n\n\n')
@@ -266,7 +262,7 @@ def _alignment_to_clustal(obj, fh):
         fh.write("\n")
 
 
- at register_reader('clustal', Alignment)
+ at clustal.reader(Alignment)
 def _clustal_to_alignment(fh, strict=True):
     r"""yields labels and sequences from msa (multiple sequence alignment)
 
@@ -317,12 +313,13 @@ def _clustal_to_alignment(fh, strict=True):
 
     records = map(_delete_trailing_number,
                   filter(_is_clustal_seq_line, fh))
-    data, labels = _label_line_parser(records, last_space, strict)
+    data, labels = _label_line_parser(records, strict)
 
     aligned_correctly = _check_length(data, labels)
     if not aligned_correctly:
         raise ClustalFormatError("Sequences not aligned properly")
     alns = []
     for key in labels:
-        alns.append(BiologicalSequence(id=key, sequence=''.join(data[key])))
+        alns.append(Sequence(sequence=''.join(data[key]),
+                             metadata={'id': key}))
     return Alignment(alns)
diff --git a/skbio/io/format/emptyfile.py b/skbio/io/format/emptyfile.py
new file mode 100644
index 0000000..49692e9
--- /dev/null
+++ b/skbio/io/format/emptyfile.py
@@ -0,0 +1,43 @@
+r"""
+Empty Files (:mod:`skbio.io.format.emptyfile`)
+==============================================
+
+.. currentmodule:: skbio.io.format.emptyfile
+
+This format exists to make debugging simpler, often an empty file is a mistake
+which can take an embarrasing amount of time to notice. This format has only
+a sniffer and no readers or writers, so error messages will indicate as such
+if an empty file is accidentally used as input.
+
+Format Support
+--------------
+**Has Sniffer: Yes**
+
+Format Specification
+--------------------
+An empty file consists of only whitespace characters.
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+
+from skbio.io import create_format
+
+emptyfile = create_format('<emptyfile>')
+
+
+ at emptyfile.sniffer()
+def _empty_file_sniffer(fh):
+    for line in fh:
+        if line.strip():
+            return False, {}
+    return True, {}
diff --git a/skbio/io/fasta.py b/skbio/io/format/fasta.py
similarity index 60%
rename from skbio/io/fasta.py
rename to skbio/io/format/fasta.py
index ea84ad3..070104b 100644
--- a/skbio/io/fasta.py
+++ b/skbio/io/format/fasta.py
@@ -1,8 +1,8 @@
 """
-FASTA/QUAL format (:mod:`skbio.io.fasta`)
-=========================================
+FASTA/QUAL format (:mod:`skbio.io.format.fasta`)
+================================================
 
-.. currentmodule:: skbio.io.fasta
+.. currentmodule:: skbio.io.format.fasta
 
 The FASTA file format (``fasta``) stores biological (i.e., nucleotide or
 protein) sequences in a simple plain text format that is both human-readable
@@ -18,7 +18,7 @@ An example of a FASTA-formatted file containing two DNA sequences::
     CATCGATCGATCGATGCATGCATGCATG
 
 The QUAL file format is an additional format related to FASTA. A FASTA file is
-sometimes accompanied by a QUAL file, particuarly when the fasta file contains
+sometimes accompanied by a QUAL file, particuarly when the FASTA file contains
 sequences generated on a high-throughput sequencing instrument. QUAL files
 store a Phred quality score (nonnegative integer) for each base in a sequence
 stored in FASTA format (see [4]_ for more details). scikit-bio supports reading
@@ -31,21 +31,19 @@ Format Support
 +------+------+---------------------------------------------------------------+
 |Reader|Writer|                          Object Class                         |
 +======+======+===============================================================+
-|Yes   |Yes   |generator of :mod:`skbio.sequence.BiologicalSequence` objects  |
+|Yes   |Yes   |generator of :mod:`skbio.sequence.Sequence` objects            |
 +------+------+---------------------------------------------------------------+
 |Yes   |Yes   |:mod:`skbio.alignment.SequenceCollection`                      |
 +------+------+---------------------------------------------------------------+
 |Yes   |Yes   |:mod:`skbio.alignment.Alignment`                               |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.BiologicalSequence`                       |
+|Yes   |Yes   |:mod:`skbio.sequence.Sequence`                                 |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.NucleotideSequence`                       |
+|Yes   |Yes   |:mod:`skbio.sequence.DNA`                                      |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.DNASequence`                              |
+|Yes   |Yes   |:mod:`skbio.sequence.RNA`                                      |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.RNASequence`                              |
-+------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.ProteinSequence`                          |
+|Yes   |Yes   |:mod:`skbio.sequence.Protein`                                  |
 +------+------+---------------------------------------------------------------+
 
 .. note:: All readers and writers support an optional QUAL file via the
@@ -62,8 +60,15 @@ A FASTA file contains one or more biological sequences. The sequences are
 stored sequentially, with a *record* for each sequence (also referred to as a
 *FASTA record*). Each *record* consists of a single-line *header* (sometimes
 referred to as a *defline*, *label*, *description*, or *comment*) followed by
-the sequence data, optionally split over multiple lines. Blank or
-whitespace-only lines are not allowed anywhere in the FASTA file.
+the sequence data, optionally split over multiple lines.
+
+.. note:: Blank or whitespace-only lines are only allowed at the beginning of
+   the file, between FASTA records, or at the end of the file. A blank or
+   whitespace-only line after the header line, within the sequence (for FASTA
+   files), or within quality scores (for QUAL files) will raise an error.
+
+   scikit-bio will ignore leading and trailing whitespace characters on each
+   line while reading.
 
 .. note:: scikit-bio does not currently support legacy FASTA format (i.e.,
    headers/comments denoted with a semicolon). The format supported by
@@ -79,20 +84,22 @@ Sequence Header
 ~~~~~~~~~~~~~~~
 Each sequence header consists of a single line beginning with a greater-than
 (``>``) symbol. Immediately following this is a sequence identifier (ID) and
-description separated by one or more whitespace characters. Both sequence ID
-and description are optional and are represented as the empty string (``''``)
-in scikit-bio's objects if they are not present in the header.
+description separated by one or more whitespace characters. The sequence ID and
+description are stored in the sequence `metadata` attribute, under the `'id'`
+and `'description'` keys, repectively. Both are optional. Each will be
+represented as the empty string (``''``) in `metadata` if it is not present
+in the header.
 
 A sequence ID consists of a single *word*: all characters after the greater-
 than symbol and before the first whitespace character (if any) are taken as the
 sequence ID. Unique sequence IDs are not strictly enforced by the FASTA format
-itself. A single standardized ID format is similarly not enforced by FASTA
+itself. A single standardized ID format is similarly not enforced by the FASTA
 format, though it is often common to use a unique library accession number for
 a sequence ID (e.g., NCBI's FASTA defline format [5]_).
 
 .. note:: scikit-bio will enforce sequence ID uniqueness depending on the type
    of object that the FASTA file is read into. For example, reading a FASTA
-   file as a generator of ``BiologicalSequence`` objects will not enforce
+   file as a generator of ``Sequence`` objects will not enforce
    unique IDs since it simply yields each sequence it finds in the FASTA file.
    However, if the FASTA file is read into a ``SequenceCollection`` object, ID
    uniqueness will be enforced because that is a requirement of a
@@ -121,25 +128,22 @@ Biological sequence data follows the header, and can be split over multiple
 lines. The sequence data (i.e., nucleotides or amino acids) are stored using
 the standard IUPAC lexicon (single-letter codes).
 
-.. note:: scikit-bio supports both upper and lower case characters. Both ``-``
-   and ``.`` are supported as gap characters. See :mod:`skbio.sequence` for
-   more details on how scikit-bio interprets sequence data in its in-memory
-   objects.
-
-   scikit-bio will remove leading and trailing whitespace from each line of
-   sequence data before joining the sequence chunks into a single sequence.
-   Whitespace characters are **not** removed from the middle of the sequence
-   chunks. Likewise, other invalid IUPAC characters are **not** removed from
-   the sequence data as it is read. Thus, it is possible to create an invalid
-   in-memory sequence object (see warning below).
-
-.. warning:: In an effort to maintain reasonable performance while reading
-   FASTA files (which can be quite large), validation of sequence data is
-   **not** performed during reading. It is the responsibility of the user to
-   validate their in-memory representation of the data if desired (e.g., by
-   calling ``is_valid`` on the returned object). Thus, it is possible to read
-   invalid characters into objects (e.g. whitespace occurring in the middle of
-   a sequence, or invalid IUPAC DNA characters in a DNA sequence).
+.. note:: scikit-bio supports both upper and lower case characters.
+   This functionality depends on the type of object the data is
+   being read into. For ``Sequence``
+   objects, sciki-bio doesn't care about the case. However, for other object
+   types, such as :class:`skbio.sequence.DNA`, :class:`skbio.sequence.RNA`,
+   and :class:`skbio.sequence.Protein`, the `lowercase` parameter
+   must be used to control case functionality. Refer to the documentation for
+   the constructors for details.
+.. note:: Both ``-`` and ``.`` are supported as gap characters. See
+   :mod:`skbio.sequence` for more details on how scikit-bio interprets
+   sequence data in its in-memory objects.
+
+   Validation is performed for all scikit-bio objects which support it. This
+   consists of all objects which enforce usage of IUPAC characters. If any
+   invalid IUPAC characters are found in the sequence while reading from the
+   FASTA file, an exception is raised.
 
 QUAL Format
 ^^^^^^^^^^^
@@ -150,6 +154,9 @@ containing a sequence ID and description. The same rules apply to QUAL headers
 as FASTA headers (see the above sections for details). scikit-bio processes
 FASTA and QUAL headers in exactly the same way.
 
+Quality scores are automatically stored in the object's `positional_metadata`
+attribute, under the `'quality'` column.
+
 Instead of storing biological sequence data in each record, a QUAL file stores
 a Phred quality score for each base in the corresponding sequence. Quality
 scores are represented as nonnegative integers separated by whitespace
@@ -177,7 +184,7 @@ QUAL File Parameter (Readers and Writers)
 The ``qual`` parameter is available to all FASTA format readers and writers. It
 can be any file-like type supported by scikit-bio's I/O registry (e.g., file
 handle, file path, etc.). If ``qual`` is provided when reading, quality scores
-will be included in each in-memory ``BiologicalSequence`` object, in addition
+will be included in each in-memory ``Sequence`` object, in addition
 to sequence data stored in the FASTA file. When writing, quality scores will be
 written in QUAL format in addition to the sequence data being written in FASTA
 format.
@@ -188,26 +195,25 @@ The available reader parameters differ depending on which reader is used.
 
 Generator, SequenceCollection, and Alignment Reader Parameters
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The ``constructor`` parameter can be used with the ``BiologicalSequence``
+The ``constructor`` parameter can be used with the ``Sequence``
 generator, ``SequenceCollection``, and ``Alignment`` FASTA readers.
 ``constructor`` specifies the in-memory type of each sequence that is parsed,
-and defaults to ``BiologicalSequence``. ``constructor`` should be a subclass of
-``BiologicalSequence``. For example, if you know that the FASTA file you're
+and defaults to ``Sequence``. ``constructor`` should be a subclass of
+``Sequence``. For example, if you know that the FASTA file you're
 reading contains protein sequences, you would pass
-``constructor=ProteinSequence`` to the reader call.
+``constructor=Protein`` to the reader call.
 
 .. note:: The FASTA sniffer will not attempt to guess the ``constructor``
-   parameter, so it will always default to ``BiologicalSequence`` if another
+   parameter, so it will always default to ``Sequence`` if another
    type is not provided to the reader.
 
-BiologicalSequence Reader Parameters
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The ``seq_num`` parameter can be used with the ``BiologicalSequence``,
-``NucleotideSequence``, ``DNASequence``, ``RNASequence``, and
-``ProteinSequence`` FASTA readers. ``seq_num`` specifies which sequence to read
-from the FASTA file (and optional QUAL file), and defaults to 1 (i.e., such
-that the first sequence is read). For example, to read the 50th sequence from a
-FASTA file, you would pass ``seq_num=50`` to the reader call.
+Sequence Reader Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``seq_num`` parameter can be used with the ``Sequence``,
+``DNA``, ``RNA``, and ``Protein`` FASTA readers. ``seq_num`` specifies which
+sequence to read from the FASTA file (and optional QUAL file), and defaults to
+1 (i.e., such that the first sequence is read). For example, to read the 50th
+sequence from a FASTA file, you would pass ``seq_num=50`` to the reader call.
 
 Writer-specific Parameters
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -246,6 +252,12 @@ The following parameters are available to all FASTA format writers:
   behavior is to not split sequence data or quality scores across multiple
   lines.
 
+- ``lowercase``: String or boolean array. If a string, it is treated as a key
+  into the positional metadata of the object. If a boolean array, it
+  indicates characters to write in lowercase. Characters in the sequence
+  corresponding to `True` values will be written in lowercase. The boolean
+  array must be the same length as the sequence.
+
 .. note:: The FASTA format writers will have noticeably better runtime
    performance if ``id_whitespace_replacement`` and/or
    ``description_newline_replacement`` are set to ``None`` so that whitespace
@@ -296,32 +308,29 @@ Let's define this file in-memory as a ``StringIO``, though this could be a real
 file path, file handle, or anything that's supported by scikit-bio's I/O
 registry in practice:
 
->>> from StringIO import StringIO
->>> fs = (
-...     ">seq1 Turkey\\n"
-...     "AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT\\n"
-...     ">seq2 Salmo gair\\n"
-...     "AAGCCTTGGCAGTGCAGGGTGAGCCGTGG\\n"
-...     "CCGGGCACGGTAT\\n"
-...     ">seq3 H. Sapiens\\n"
-...     "ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA\\n"
-...     ">seq4 Chimp\\n"
-...     "AAACCCTTGCCG\\n"
-...     "TTACGCTTAAAC\\n"
-...     "CGAGGCCGGGAC\\n"
-...     "ACTCAT\\n"
-...     ">seq5 Gorilla\\n"
-...     "AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA\\n")
->>> fh = StringIO(fs)
+>>> fl = [u">seq1 Turkey\\n",
+...       u"AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT\\n",
+...       u">seq2 Salmo gair\\n",
+...       u"AAGCCTTGGCAGTGCAGGGTGAGCCGTGG\\n",
+...       u"CCGGGCACGGTAT\\n",
+...       u">seq3 H. Sapiens\\n",
+...       u"ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA\\n",
+...       u">seq4 Chimp\\n",
+...       u"AAACCCTTGCCG\\n",
+...       u"TTACGCTTAAAC\\n",
+...       u"CGAGGCCGGGAC\\n",
+...       u"ACTCAT\\n",
+...       u">seq5 Gorilla\\n",
+...       u"AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA\\n"]
 
 Let's read the FASTA file into a ``SequenceCollection``:
 
 >>> from skbio import SequenceCollection
->>> sc = SequenceCollection.read(fh)
+>>> sc = SequenceCollection.read(fl)
 >>> sc.sequence_lengths()
 [42, 42, 42, 42, 42]
 >>> sc.ids()
-['seq1', 'seq2', 'seq3', 'seq4', 'seq5']
+[u'seq1', u'seq2', u'seq3', u'seq4', u'seq5']
 
 We see that all 5 sequences have 42 characters, and that each of the sequence
 IDs were successfully read into memory.
@@ -331,8 +340,7 @@ aligned), let's load the FASTA file into an ``Alignment`` object, which is a
 more appropriate data structure:
 
 >>> from skbio import Alignment
->>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
->>> aln = Alignment.read(fh)
+>>> aln = Alignment.read(fl)
 >>> aln.sequence_length()
 42
 
@@ -345,30 +353,44 @@ the correct file format for us!
 Let's inspect the type of sequences stored in the ``Alignment``:
 
 >>> aln[0]
-<BiologicalSequence: AAGCTNGGGC... (length: 42)>
-
-By default, sequences are loaded as ``BiologicalSequence`` objects. We can
+Sequence
+------------------------------------------------
+Metadata:
+    u'description': u'Turkey'
+    u'id': u'seq1'
+Stats:
+    length: 42
+------------------------------------------------
+0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
+
+By default, sequences are loaded as ``Sequence`` objects. We can
 change the type of sequence via the ``constructor`` parameter:
 
->>> from skbio import DNASequence
->>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
->>> aln = Alignment.read(fh, constructor=DNASequence)
->>> aln[0]
-<DNASequence: AAGCTNGGGC... (length: 42)>
-
-We now have an ``Alignment`` of ``DNASequence`` objects instead of
-``BiologicalSequence`` objects. Validation of sequence character data is not
-performed during reading (see warning above for details). To verify that each
-of the sequences are valid DNA sequences:
-
->>> aln.is_valid()
-True
+>>> from skbio import DNA
+>>> aln = Alignment.read(fl, constructor=DNA)
+>>> aln[0] # doctest: +NORMALIZE_WHITESPACE
+DNA
+------------------------------------------------
+Metadata:
+    u'description': u'Turkey'
+    u'id': u'seq1'
+Stats:
+    length: 42
+    has gaps: False
+    has degenerates: True
+    has non-degenerates: True
+    GC-content: 54.76%
+------------------------------------------------
+0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
+
+We now have an ``Alignment`` of ``DNA`` objects instead of
+``Sequence`` objects.
 
 To write the alignment in FASTA format:
 
->>> new_fh = StringIO()
->>> aln.write(new_fh)
->>> print(new_fh.getvalue())
+>>> from io import StringIO
+>>> with StringIO() as fh:
+...     print(aln.write(fh).getvalue())
 >seq1 Turkey
 AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT
 >seq2 Salmo gair
@@ -380,58 +402,127 @@ AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT
 >seq5 Gorilla
 AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
 <BLANKLINE>
->>> new_fh.close()
 
 Both ``SequenceCollection`` and ``Alignment`` load all of the sequences from
 the FASTA file into memory at once. If the FASTA file is large (which is often
 the case), this may be infeasible if you don't have enough memory. To work
 around this issue, you can stream the sequences using scikit-bio's
 generator-based FASTA reader and writer. The generator-based reader yields
-``BiologicalSequence`` objects (or subclasses if ``constructor`` is supplied)
+``Sequence`` objects (or subclasses if ``constructor`` is supplied)
 one at a time, instead of loading all sequences into memory. For example, let's
 use the generator-based reader to process a single sequence at a time in a
 ``for`` loop:
 
 >>> import skbio.io
->>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
->>> for seq in skbio.io.read(fh, format='fasta'):
+>>> for seq in skbio.io.read(fl, format='fasta'):
 ...     seq
-<BiologicalSequence: AAGCTNGGGC... (length: 42)>
-<BiologicalSequence: AAGCCTTGGC... (length: 42)>
-<BiologicalSequence: ACCGGTTGGC... (length: 42)>
-<BiologicalSequence: AAACCCTTGC... (length: 42)>
-<BiologicalSequence: AAACCCTTGC... (length: 42)>
-
-A single sequence can also be read into a ``BiologicalSequence`` (or subclass):
+...     print('')
+Sequence
+------------------------------------------------
+Metadata:
+    u'description': u'Turkey'
+    u'id': u'seq1'
+Stats:
+    length: 42
+------------------------------------------------
+0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
+<BLANKLINE>
+Sequence
+------------------------------------------------
+Metadata:
+    u'description': u'Salmo gair'
+    u'id': u'seq2'
+Stats:
+    length: 42
+------------------------------------------------
+0 AAGCCTTGGC AGTGCAGGGT GAGCCGTGGC CGGGCACGGT AT
+<BLANKLINE>
+Sequence
+------------------------------------------------
+Metadata:
+    u'description': u'H. Sapiens'
+    u'id': u'seq3'
+Stats:
+    length: 42
+------------------------------------------------
+0 ACCGGTTGGC CGTTCAGGGT ACAGGTTGGC CGTTCAGGGT AA
+<BLANKLINE>
+Sequence
+------------------------------------------------
+Metadata:
+    u'description': u'Chimp'
+    u'id': u'seq4'
+Stats:
+    length: 42
+------------------------------------------------
+0 AAACCCTTGC CGTTACGCTT AAACCGAGGC CGGGACACTC AT
+<BLANKLINE>
+Sequence
+------------------------------------------------
+Metadata:
+    u'description': u'Gorilla'
+    u'id': u'seq5'
+Stats:
+    length: 42
+------------------------------------------------
+0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
+<BLANKLINE>
 
->>> from skbio import BiologicalSequence
->>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
->>> BiologicalSequence.read(fh)
-<BiologicalSequence: AAGCTNGGGC... (length: 42)>
+A single sequence can also be read into a ``Sequence`` (or subclass):
+
+>>> from skbio import Sequence
+>>> seq = Sequence.read(fl)
+>>> seq
+Sequence
+------------------------------------------------
+Metadata:
+    u'description': u'Turkey'
+    u'id': u'seq1'
+Stats:
+    length: 42
+------------------------------------------------
+0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
 
 By default, the first sequence in the FASTA file is read. This can be
 controlled with ``seq_num``. For example, to read the fifth sequence:
 
->>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
->>> BiologicalSequence.read(fh, seq_num=5)
-<BiologicalSequence: AAACCCTTGC... (length: 42)>
-
-We can use the same API to read the fifth sequence into a ``DNASequence``:
-
->>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
->>> dna_seq = DNASequence.read(fh, seq_num=5)
+>>> seq = Sequence.read(fl, seq_num=5)
+>>> seq
+Sequence
+------------------------------------------------
+Metadata:
+    u'description': u'Gorilla'
+    u'id': u'seq5'
+Stats:
+    length: 42
+------------------------------------------------
+0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
+
+We can use the same API to read the fifth sequence into a ``DNA``:
+
+>>> dna_seq = DNA.read(fl, seq_num=5)
 >>> dna_seq
-<DNASequence: AAACCCTTGC... (length: 42)>
+DNA
+------------------------------------------------
+Metadata:
+    u'description': u'Gorilla'
+    u'id': u'seq5'
+Stats:
+    length: 42
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 50.00%
+------------------------------------------------
+0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
 
 Individual sequence objects can also be written in FASTA format:
 
->>> new_fh = StringIO()
->>> dna_seq.write(new_fh)
->>> print(new_fh.getvalue())
+>>> with StringIO() as fh:
+...     print(dna_seq.write(fh).getvalue())
 >seq5 Gorilla
 AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
 <BLANKLINE>
->>> new_fh.close()
 
 Reading and Writing FASTA/QUAL Files
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -452,40 +543,57 @@ Also suppose we have the following QUAL file::
     >seq2 db-accession-34989
     3 3 10 42 80
 
->>> fasta_fs = (
-...     ">seq1 db-accession-149855\\n"
-...     "CGATGTC\\n"
-...     ">seq2 db-accession-34989\\n"
-...     "CATCG\\n")
->>> fasta_fh = StringIO(fasta_fs)
->>> qual_fs = (
-...     ">seq1 db-accession-149855\\n"
-...     "40 39 39 4\\n"
-...     "50 1 100\\n"
-...     ">seq2 db-accession-34989\\n"
-...     "3 3 10 42 80\\n")
->>> qual_fh = StringIO(qual_fs)
-
-To read in a single ``BiologicalSequence`` at a time, we can use the
+>>> fasta_fl = [
+...     u">seq1 db-accession-149855\\n",
+...     u"CGATGTC\\n",
+...     u">seq2 db-accession-34989\\n",
+...     u"CATCG\\n"]
+>>> qual_fl = [
+...     u">seq1 db-accession-149855\\n",
+...     u"40 39 39 4\\n",
+...     u"50 1 100\\n",
+...     u">seq2 db-accession-34989\\n",
+...     u"3 3 10 42 80\\n"]
+
+To read in a single ``Sequence`` at a time, we can use the
 generator-based reader as we did above, providing both FASTA and QUAL files:
 
->>> for seq in skbio.io.read(fasta_fh, qual=qual_fh, format='fasta'):
+>>> for seq in skbio.io.read(fasta_fl, qual=qual_fl, format='fasta'):
 ...     seq
-...     seq.quality
-<BiologicalSequence: CGATGTC (length: 7)>
-array([ 40,  39,  39,   4,  50,   1, 100])
-<BiologicalSequence: CATCG (length: 5)>
-array([ 3,  3, 10, 42, 80])
+...     print('')
+Sequence
+------------------------------------------
+Metadata:
+    u'description': u'db-accession-149855'
+    u'id': u'seq1'
+Positional metadata:
+    u'quality': <dtype: uint8>
+Stats:
+    length: 7
+------------------------------------------
+0 CGATGTC
+<BLANKLINE>
+Sequence
+-----------------------------------------
+Metadata:
+    u'description': u'db-accession-34989'
+    u'id': u'seq2'
+Positional metadata:
+    u'quality': <dtype: uint8>
+Stats:
+    length: 5
+-----------------------------------------
+0 CATCG
+<BLANKLINE>
 
-Note that the sequence objects have quality scores since we provided a QUAL
-file. The other FASTA readers operate in a similar manner.
+Note that the sequence objects have quality scores stored as positional
+metadata since we provided a QUAL file. The other FASTA readers operate in a
+similar manner.
 
 Now let's load the sequences and their quality scores into a
 ``SequenceCollection``:
 
->>> fasta_fh = StringIO(fasta_fs) # reload to read from the beginning again
->>> qual_fh = StringIO(qual_fs) # reload to read from the beginning again
->>> sc = SequenceCollection.read(fasta_fh, qual=qual_fh)
+>>> sc = SequenceCollection.read(fasta_fl, qual=qual_fl)
 >>> sc
 <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
 
@@ -494,7 +602,7 @@ FASTA and QUAL files, respectively, we run:
 
 >>> new_fasta_fh = StringIO()
 >>> new_qual_fh = StringIO()
->>> sc.write(new_fasta_fh, qual=new_qual_fh)
+>>> _ = sc.write(new_fasta_fh, qual=new_qual_fh)
 >>> print(new_fasta_fh.getvalue())
 >seq1 db-accession-149855
 CGATGTC
@@ -525,6 +633,7 @@ References
 .. [6] http://evolution.genetics.washington.edu/phylip/doc/sequence.html
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -533,69 +642,83 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
 from future.builtins import range, zip
-from future.standard_library import hooks
+from six.moves import zip_longest
 
 import textwrap
 
 import numpy as np
 
-from skbio.io import (register_reader, register_writer, register_sniffer,
-                      FASTAFormatError, FileSentinel)
-from skbio.io._base import (_chunk_str, _get_nth_sequence,
-                            _parse_fasta_like_header,
-                            _format_fasta_like_records)
+from skbio.io import create_format, FASTAFormatError, QUALFormatError
+from skbio.io.registry import FileSentinel
+from skbio.io.format._base import (_get_nth_sequence,
+                                   _parse_fasta_like_header,
+                                   _format_fasta_like_records, _line_generator,
+                                   _too_many_blanks)
+from skbio.util._misc import chunk_str
 from skbio.alignment import SequenceCollection, Alignment
-from skbio.sequence import (BiologicalSequence, NucleotideSequence,
-                            DNASequence, RNASequence, ProteinSequence)
+from skbio.sequence import Sequence, DNA, RNA, Protein
+
 
-with hooks():
-    from itertools import zip_longest
+fasta = create_format('fasta')
 
 
- at register_sniffer('fasta')
+ at fasta.sniffer()
 def _fasta_sniffer(fh):
     # Strategy:
-    #   Read up to 10 FASTA records. If at least one record is read (i.e. the
-    #   file isn't empty) and no errors are thrown during reading, assume the
-    #   file is in FASTA format. Next, try to parse the file as QUAL, which has
-    #   stricter requirements. If this succeeds, do *not* identify the file as
-    #   FASTA since we don't want to sniff QUAL files as FASTA (technically
-    #   they can be read as FASTA since the sequences aren't validated but it
-    #   probably isn't what the user wanted). Also, if we add QUAL as its own
-    #   file format in the future, we wouldn't want the FASTA and QUAL sniffers
-    #   to both identify a QUAL file.
+    #   Ignore up to 5 blank/whitespace-only lines at the beginning of the
+    #   file. Read up to 10 records. If at least one record is read (i.e.
+    #   the file isn't empty) and no errors are thrown during reading, assume
+    #   the file is in FASTA format. If a record appears to be QUAL, do *not*
+    #   identify the file as FASTA since we don't want to sniff QUAL files as
+    #   FASTA (technically they can be read as FASTA since the sequences may
+    #   not be validated but it probably isn't what the user wanted). Also, if
+    #   we add QUAL as its own file format in the future, we wouldn't want the
+    #   FASTA and QUAL sniffers to both positively identify a QUAL file.
+    if _too_many_blanks(fh, 5):
+        return False, {}
+
     num_records = 10
+    empty = True
     try:
-        not_empty = False
-        for _ in zip(range(num_records), _fasta_to_generator(fh)):
-            not_empty = True
-
-        if not_empty:
-            fh.seek(0)
-            try:
-                list(zip(range(num_records),
-                         _parse_fasta_raw(fh, _parse_quality_scores, 'QUAL')))
-            except FASTAFormatError:
-                return True, {}
-            else:
-                return False, {}
-        else:
-            return False, {}
+        parser = _parse_fasta_raw(fh, _sniffer_data_parser, FASTAFormatError)
+        for _ in zip(range(num_records), parser):
+            empty = False
     except FASTAFormatError:
         return False, {}
 
+    if empty:
+        return False, {}
+    else:
+        return True, {}
+
 
- at register_reader('fasta')
-def _fasta_to_generator(fh, qual=FileSentinel, constructor=BiologicalSequence):
+def _sniffer_data_parser(chunks):
+    data = _parse_sequence_data(chunks)
+    try:
+        _parse_quality_scores(chunks)
+    except QUALFormatError:
+        return data
+    else:
+        # used for flow control within sniffer, user should never see this
+        # message
+        raise FASTAFormatError('Data appear to be quality scores.')
+
+
+ at fasta.reader(None)
+def _fasta_to_generator(fh, qual=FileSentinel, constructor=Sequence, **kwargs):
     if qual is None:
         for seq, id_, desc in _parse_fasta_raw(fh, _parse_sequence_data,
-                                               'FASTA'):
-            yield constructor(seq, id=id_, description=desc)
+                                               FASTAFormatError):
+            yield constructor(seq, metadata={'id': id_, 'description': desc},
+                              **kwargs)
     else:
-        fasta_gen = _parse_fasta_raw(fh, _parse_sequence_data, 'FASTA')
-        qual_gen = _parse_fasta_raw(qual, _parse_quality_scores, 'QUAL')
+        fasta_gen = _parse_fasta_raw(fh, _parse_sequence_data,
+                                     FASTAFormatError)
+        qual_gen = _parse_fasta_raw(qual, _parse_quality_scores,
+                                    QUALFormatError)
 
         for fasta_rec, qual_rec in zip_longest(fasta_gen, qual_gen,
                                                fillvalue=None):
@@ -612,69 +735,70 @@ def _fasta_to_generator(fh, qual=FileSentinel, constructor=BiologicalSequence):
             if fasta_id != qual_id:
                 raise FASTAFormatError(
                     "IDs do not match between FASTA and QUAL records: %r != %r"
-                    % (fasta_id, qual_id))
+                    % (str(fasta_id), str(qual_id)))
             if fasta_desc != qual_desc:
                 raise FASTAFormatError(
                     "Descriptions do not match between FASTA and QUAL "
-                    "records: %r != %r" % (fasta_desc, qual_desc))
+                    "records: %r != %r" % (str(fasta_desc), str(qual_desc)))
 
             # sequence and quality scores lengths are checked in constructor
-            yield constructor(fasta_seq, id=fasta_id, description=fasta_desc,
-                              quality=qual_scores)
+            yield constructor(
+                fasta_seq,
+                metadata={'id': fasta_id, 'description': fasta_desc},
+                positional_metadata={'quality': qual_scores}, **kwargs)
 
 
- at register_reader('fasta', BiologicalSequence)
+ at fasta.reader(Sequence)
 def _fasta_to_biological_sequence(fh, qual=FileSentinel, seq_num=1):
     return _get_nth_sequence(
-        _fasta_to_generator(fh, qual=qual, constructor=BiologicalSequence),
+        _fasta_to_generator(fh, qual=qual, constructor=Sequence),
         seq_num)
 
 
- at register_reader('fasta', NucleotideSequence)
-def _fasta_to_nucleotide_sequence(fh, qual=FileSentinel, seq_num=1):
+ at fasta.reader(DNA)
+def _fasta_to_dna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
     return _get_nth_sequence(
-        _fasta_to_generator(fh, qual=qual, constructor=NucleotideSequence),
+        _fasta_to_generator(fh, qual=qual,
+                            constructor=DNA, **kwargs),
         seq_num)
 
 
- at register_reader('fasta', DNASequence)
-def _fasta_to_dna_sequence(fh, qual=FileSentinel, seq_num=1):
+ at fasta.reader(RNA)
+def _fasta_to_rna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
     return _get_nth_sequence(
-        _fasta_to_generator(fh, qual=qual, constructor=DNASequence),
+        _fasta_to_generator(fh, qual=qual,
+                            constructor=RNA, **kwargs),
         seq_num)
 
 
- at register_reader('fasta', RNASequence)
-def _fasta_to_rna_sequence(fh, qual=FileSentinel, seq_num=1):
+ at fasta.reader(Protein)
+def _fasta_to_protein_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
     return _get_nth_sequence(
-        _fasta_to_generator(fh, qual=qual, constructor=RNASequence),
+        _fasta_to_generator(fh, qual=qual,
+                            constructor=Protein, **kwargs),
         seq_num)
 
 
- at register_reader('fasta', ProteinSequence)
-def _fasta_to_protein_sequence(fh, qual=FileSentinel, seq_num=1):
-    return _get_nth_sequence(
-        _fasta_to_generator(fh, qual=qual, constructor=ProteinSequence),
-        seq_num)
-
-
- at register_reader('fasta', SequenceCollection)
+ at fasta.reader(SequenceCollection)
 def _fasta_to_sequence_collection(fh, qual=FileSentinel,
-                                  constructor=BiologicalSequence):
+                                  constructor=Sequence, **kwargs):
     return SequenceCollection(
-        list(_fasta_to_generator(fh, qual=qual, constructor=constructor)))
+        list(_fasta_to_generator(fh, qual=qual, constructor=constructor,
+                                 **kwargs)))
 
 
- at register_reader('fasta', Alignment)
-def _fasta_to_alignment(fh, qual=FileSentinel, constructor=BiologicalSequence):
+ at fasta.reader(Alignment)
+def _fasta_to_alignment(fh, qual=FileSentinel, constructor=Sequence, **kwargs):
     return Alignment(
-        list(_fasta_to_generator(fh, qual=qual, constructor=constructor)))
+        list(_fasta_to_generator(fh, qual=qual, constructor=constructor,
+                                 **kwargs)))
 
 
- at register_writer('fasta')
+ at fasta.writer(None)
 def _generator_to_fasta(obj, fh, qual=FileSentinel,
                         id_whitespace_replacement='_',
-                        description_newline_replacement=' ', max_width=None):
+                        description_newline_replacement=' ', max_width=None,
+                        lowercase=None):
     if max_width is not None:
         if max_width < 1:
             raise ValueError(
@@ -691,10 +815,10 @@ def _generator_to_fasta(obj, fh, qual=FileSentinel,
 
     formatted_records = _format_fasta_like_records(
         obj, id_whitespace_replacement, description_newline_replacement,
-        qual is not None)
+        qual is not None, lowercase)
     for header, seq_str, qual_scores in formatted_records:
         if max_width is not None:
-            seq_str = _chunk_str(seq_str, max_width, '\n')
+            seq_str = chunk_str(seq_str, max_width, '\n')
 
         fh.write('>%s\n%s\n' % (header, seq_str))
 
@@ -705,7 +829,7 @@ def _generator_to_fasta(obj, fh, qual=FileSentinel,
             qual.write('>%s\n%s\n' % (header, qual_str))
 
 
- at register_writer('fasta', BiologicalSequence)
+ at fasta.writer(Sequence)
 def _biological_sequence_to_fasta(obj, fh, qual=FileSentinel,
                                   id_whitespace_replacement='_',
                                   description_newline_replacement=' ',
@@ -714,114 +838,122 @@ def _biological_sequence_to_fasta(obj, fh, qual=FileSentinel,
                         description_newline_replacement, max_width)
 
 
- at register_writer('fasta', NucleotideSequence)
-def _nucleotide_sequence_to_fasta(obj, fh, qual=FileSentinel,
-                                  id_whitespace_replacement='_',
-                                  description_newline_replacement=' ',
-                                  max_width=None):
-    _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
-                        description_newline_replacement, max_width)
-
-
- at register_writer('fasta', DNASequence)
+ at fasta.writer(DNA)
 def _dna_sequence_to_fasta(obj, fh, qual=FileSentinel,
                            id_whitespace_replacement='_',
                            description_newline_replacement=' ',
-                           max_width=None):
+                           max_width=None, lowercase=None):
     _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
-                        description_newline_replacement, max_width)
+                        description_newline_replacement, max_width, lowercase)
 
 
- at register_writer('fasta', RNASequence)
+ at fasta.writer(RNA)
 def _rna_sequence_to_fasta(obj, fh, qual=FileSentinel,
                            id_whitespace_replacement='_',
                            description_newline_replacement=' ',
-                           max_width=None):
+                           max_width=None, lowercase=None):
     _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
-                        description_newline_replacement, max_width)
+                        description_newline_replacement, max_width, lowercase)
 
 
- at register_writer('fasta', ProteinSequence)
+ at fasta.writer(Protein)
 def _protein_sequence_to_fasta(obj, fh, qual=FileSentinel,
                                id_whitespace_replacement='_',
                                description_newline_replacement=' ',
-                               max_width=None):
+                               max_width=None, lowercase=None):
     _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
-                        description_newline_replacement, max_width)
+                        description_newline_replacement, max_width, lowercase)
 
 
- at register_writer('fasta', SequenceCollection)
+ at fasta.writer(SequenceCollection)
 def _sequence_collection_to_fasta(obj, fh, qual=FileSentinel,
                                   id_whitespace_replacement='_',
                                   description_newline_replacement=' ',
-                                  max_width=None):
+                                  max_width=None, lowercase=None):
     _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
-                        description_newline_replacement, max_width)
+                        description_newline_replacement, max_width, lowercase)
 
 
- at register_writer('fasta', Alignment)
+ at fasta.writer(Alignment)
 def _alignment_to_fasta(obj, fh, qual=FileSentinel,
                         id_whitespace_replacement='_',
-                        description_newline_replacement=' ', max_width=None):
+                        description_newline_replacement=' ', max_width=None,
+                        lowercase=None):
     _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
-                        description_newline_replacement, max_width)
+                        description_newline_replacement, max_width, lowercase)
 
 
-def _parse_fasta_raw(fh, data_parser, format_label):
+def _parse_fasta_raw(fh, data_parser, error_type):
     """Raw parser for FASTA or QUAL files.
 
     Returns raw values (seq/qual, id, description). It is the responsibility of
     the caller to construct the correct in-memory object to hold the data.
 
     """
-    line = next(fh)
+    # Skip any blank or whitespace-only lines at beginning of file
+    seq_header = next(_line_generator(fh, skip_blanks=True))
+
     # header check inlined here and below for performance
-    if line.startswith('>'):
-        id_, desc = _parse_fasta_like_header(line)
+    if seq_header.startswith('>'):
+        id_, desc = _parse_fasta_like_header(seq_header)
     else:
-        raise FASTAFormatError(
-            "Found line without a header in %s-formatted file:\n%s" %
-            (format_label, line))
+        raise error_type(
+            "Found non-header line when attempting to read the 1st record:"
+            "\n%s" % seq_header)
 
     data_chunks = []
-    for line in fh:
+    prev = seq_header
+    for line in _line_generator(fh, skip_blanks=False):
         if line.startswith('>'):
             # new header, so yield current record and reset state
             yield data_parser(data_chunks), id_, desc
             data_chunks = []
             id_, desc = _parse_fasta_like_header(line)
         else:
-            line = line.strip()
             if line:
+                # ensure no blank lines within a single record
+                if not prev:
+                    raise error_type(
+                        "Found blank or whitespace-only line within record.")
                 data_chunks.append(line)
-            else:
-                raise FASTAFormatError(
-                    "Found blank or whitespace-only line in %s-formatted "
-                    "file." % format_label)
+        prev = line
     # yield last record in file
     yield data_parser(data_chunks), id_, desc
 
 
 def _parse_sequence_data(chunks):
     if not chunks:
-        raise FASTAFormatError("Found FASTA header without sequence data.")
+        raise FASTAFormatError("Found header without sequence data.")
     return ''.join(chunks)
 
 
 def _parse_quality_scores(chunks):
     if not chunks:
-        raise FASTAFormatError("Found QUAL header without quality scores.")
+        raise QUALFormatError("Found header without quality scores.")
 
     qual_str = ' '.join(chunks)
     try:
-        return np.asarray(qual_str.split(), dtype=int)
+        quality = np.asarray(qual_str.split(), dtype=int)
     except ValueError:
-        raise FASTAFormatError(
-            "Could not convert quality scores to integers:\n%s" % qual_str)
+        raise QUALFormatError(
+            "Could not convert quality scores to integers:\n%s"
+            % str(qual_str))
+
+    if (quality < 0).any():
+        raise QUALFormatError(
+            "Encountered negative quality score(s). Quality scores must be "
+            "greater than or equal to zero.")
+    if (quality > 255).any():
+        raise QUALFormatError(
+            "Encountered quality score(s) greater than 255. scikit-bio only "
+            "supports quality scores in the range 0-255 (inclusive) when "
+            "reading QUAL files.")
+    return quality.astype(np.uint8, casting='unsafe', copy=False)
 
 
 def _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
-                        description_newline_replacement, max_width):
+                        description_newline_replacement, max_width,
+                        lowercase=None):
     def seq_gen():
         for seq in obj:
             yield seq
@@ -830,4 +962,4 @@ def _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
         seq_gen(), fh, qual=qual,
         id_whitespace_replacement=id_whitespace_replacement,
         description_newline_replacement=description_newline_replacement,
-        max_width=max_width)
+        max_width=max_width, lowercase=lowercase)
diff --git a/skbio/io/fastq.py b/skbio/io/format/fastq.py
similarity index 68%
rename from skbio/io/fastq.py
rename to skbio/io/format/fastq.py
index 7a25b4d..af36416 100644
--- a/skbio/io/fastq.py
+++ b/skbio/io/format/fastq.py
@@ -1,8 +1,8 @@
 r"""
-FASTQ format (:mod:`skbio.io.fastq`)
-====================================
+FASTQ format (:mod:`skbio.io.format.fastq`)
+===========================================
 
-.. currentmodule:: skbio.io.fastq
+.. currentmodule:: skbio.io.format.fastq
 
 The FASTQ file format (``fastq``) stores biological (e.g., nucleotide)
 sequences and their quality scores in a simple plain text format that is both
@@ -36,21 +36,19 @@ Format Support
 +------+------+---------------------------------------------------------------+
 |Reader|Writer|                          Object Class                         |
 +======+======+===============================================================+
-|Yes   |Yes   |generator of :mod:`skbio.sequence.BiologicalSequence` objects  |
+|Yes   |Yes   |generator of :mod:`skbio.sequence.Sequence` objects            |
 +------+------+---------------------------------------------------------------+
 |Yes   |Yes   |:mod:`skbio.alignment.SequenceCollection`                      |
 +------+------+---------------------------------------------------------------+
 |Yes   |Yes   |:mod:`skbio.alignment.Alignment`                               |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.BiologicalSequence`                       |
+|Yes   |Yes   |:mod:`skbio.sequence.Sequence`                                 |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.NucleotideSequence`                       |
+|Yes   |Yes   |:mod:`skbio.sequence.DNA`                                      |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.DNASequence`                              |
+|Yes   |Yes   |:mod:`skbio.sequence.RNA`                                      |
 +------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.RNASequence`                              |
-+------+------+---------------------------------------------------------------+
-|Yes   |Yes   |:mod:`skbio.sequence.ProteinSequence`                          |
+|Yes   |Yes   |:mod:`skbio.sequence.Protein`                                  |
 +------+------+---------------------------------------------------------------+
 
 Format Specification
@@ -71,21 +69,32 @@ sections:
 
 For the complete FASTQ format specification, see [1]_. scikit-bio's FASTQ
 implementation follows the format specification described in this excellent
-publication, including validating the implementation against the FASTQ examples
-provided in the publication's supplementary data.
+publication, including validating the implementation against the FASTQ example
+files provided in the publication's supplementary data.
 
 .. note:: IDs and descriptions will be parsed from sequence header lines in
-   exactly the same way as FASTA headers (:mod:`skbio.io.fasta`).
+   exactly the same way as FASTA headers (:mod:`skbio.io.format.fasta`). IDs,
+   descriptions, and quality scores are also stored automatically on the
+   object in the same way as with FASTA.
+
+.. note:: Blank or whitespace-only lines are only allowed at the beginning of
+   the file, between FASTQ records, or at the end of the file. A blank or
+   whitespace-only line after the header line, within the sequence, or within
+   quality scores will raise an error.
+
+   scikit-bio will ignore leading and trailing whitespace characters on each
+   line while reading.
 
-   Whitespace is not allowed in sequence data or quality scores. Leading and
-   trailing whitespace is not stripped from sequence data or quality scores,
-   resulting in an error being raised if found.
+.. note:: Validation may be performed depending on the type of object the data
+   is being read into. This behavior matches that of FASTA files.
 
-   scikit-bio will write FASTQ files in a normalized format, with each record
-   section on a single line. Thus, each record will be composed of *exactly*
-   four lines. The quality header line won't have the sequence ID and
+.. note:: scikit-bio will write FASTQ files in a normalized format, with each
+   record section on a single line. Thus, each record will be composed of
+   *exactly* four lines. The quality header line won't have the sequence ID and
    description repeated.
 
+.. note:: `lowercase` functionality is supported the same as with FASTA.
+
 Quality Score Variants
 ^^^^^^^^^^^^^^^^^^^^^^
 FASTQ associates quality scores with sequence data, with each quality score
@@ -144,7 +153,7 @@ The following parameters are available to all FASTQ format readers and writers:
    provided at the same time.
 
 The following additional parameters are the same as in FASTA format
-(:mod:`skbio.io.fasta`):
+(:mod:`skbio.io.format.fasta`):
 
 - ``constructor``: see ``constructor`` parameter in FASTA format
 
@@ -156,6 +165,8 @@ The following additional parameters are the same as in FASTA format
 - ``description_newline_replacement``: see ``description_newline_replacement``
   parameter in FASTA format
 
+- ``lowercase``: see ``lowercase`` parameter in FASTA format
+
 Examples
 --------
 Suppose we have the following FASTQ file with two DNA sequences::
@@ -180,7 +191,7 @@ Let's define this file in-memory as a ``StringIO``, though this could be a real
 file path, file handle, or anything that's supported by scikit-bio's I/O
 registry in practice:
 
->>> from StringIO import StringIO
+>>> from io import StringIO
 >>> fs = '\n'.join([
 ...     r"@seq1 description 1",
 ...     r"AACACCAAACTTCTCCACC",
@@ -202,19 +213,33 @@ To load the sequences into a ``SequenceCollection``, we run:
 <SequenceCollection: n=2; mean +/- std length=36.50 +/- 1.50>
 
 Note that quality scores are decoded from Sanger. To load the second sequence
-as a ``DNASequence``:
+as a ``DNA``:
 
->>> from skbio import DNASequence
+>>> from skbio import DNA
 >>> fh = StringIO(fs) # reload the StringIO to read from the beginning again
->>> DNASequence.read(fh, variant='sanger', seq_num=2)
-<DNASequence: TATGTATATA... (length: 35)>
+>>> seq = DNA.read(fh, variant='sanger', seq_num=2)
+>>> seq
+DNA
+----------------------------------------
+Metadata:
+    u'description': u'description 2'
+    u'id': u'seq2'
+Positional metadata:
+    u'quality': <dtype: uint8>
+Stats:
+    length: 35
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 14.29%
+----------------------------------------
+0 TATGTATATA TAACATATAC ATATATACAT ACATA
 
 To write our ``SequenceCollection`` to a FASTQ file with quality scores encoded
 using the ``illumina1.3`` variant:
 
 >>> new_fh = StringIO()
->>> sc.write(new_fh, format='fastq', variant='illumina1.3')
->>> print(new_fh.getvalue())
+>>> print(sc.write(new_fh, format='fastq', variant='illumina1.3').getvalue())
 @seq1 description 1
 AACACCAAACTTCTCCACCACGTGAGCTACAAAAGGGT
 +
@@ -243,6 +268,7 @@ References
 .. [3] http://www.open-bio.org/
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -251,28 +277,38 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
 from future.builtins import range, zip
 
 import re
-from skbio.io import (register_reader, register_writer, register_sniffer,
-                      FASTQFormatError)
-from skbio.io._base import (_decode_qual_to_phred, _encode_phred_to_qual,
-                            _get_nth_sequence, _parse_fasta_like_header,
-                            _format_fasta_like_records)
+
+import numpy as np
+
+from skbio.io import create_format, FASTQFormatError
+from skbio.io.format._base import (
+    _decode_qual_to_phred, _encode_phred_to_qual, _get_nth_sequence,
+    _parse_fasta_like_header, _format_fasta_like_records, _line_generator,
+    _too_many_blanks)
 from skbio.alignment import SequenceCollection, Alignment
-from skbio.sequence import (BiologicalSequence, NucleotideSequence,
-                            DNASequence, RNASequence, ProteinSequence)
+from skbio.sequence import Sequence, DNA, RNA, Protein
 
 _whitespace_regex = re.compile(r'\s')
 
 
- at register_sniffer('fastq')
+fastq = create_format('fastq')
+
+
+ at fastq.sniffer()
 def _fastq_sniffer(fh):
     # Strategy:
-    #   Read up to 10 records. If at least one record is read (i.e. the file
-    #   isn't empty) and the quality scores are in printable ASCII range,
+    #   Ignore up to 5 blank/whitespace-only lines at the beginning of the
+    #   file. Read up to 10 records. If at least one record is read (i.e. the
+    #   file isn't empty) and the quality scores are in printable ASCII range,
     #   assume the file is FASTQ.
+    if _too_many_blanks(fh, 5):
+        return False, {}
+
     try:
         not_empty = False
         for _ in zip(range(10), _fastq_to_generator(fh, phred_offset=33)):
@@ -282,95 +318,97 @@ def _fastq_sniffer(fh):
         return False, {}
 
 
- at register_reader('fastq')
+ at fastq.reader(None)
 def _fastq_to_generator(fh, variant=None, phred_offset=None,
-                        constructor=BiologicalSequence):
-    seq_header = next(_line_generator(fh))
+                        constructor=Sequence, **kwargs):
+    # Skip any blank or whitespace-only lines at beginning of file
+    seq_header = next(_line_generator(fh, skip_blanks=True))
+
     if not seq_header.startswith('@'):
         raise FASTQFormatError(
             "Expected sequence (@) header line at start of file: %r"
-            % seq_header)
+            % str(seq_header))
 
     while seq_header is not None:
         id_, desc = _parse_fasta_like_header(seq_header)
-        seq, qual_header = _parse_sequence_data(fh)
+        seq, qual_header = _parse_sequence_data(fh, seq_header)
 
         if qual_header != '+' and qual_header[1:] != seq_header[1:]:
             raise FASTQFormatError(
                 "Sequence (@) and quality (+) header lines do not match: "
-                "%r != %r" % (seq_header[1:], qual_header[1:]))
+                "%r != %r" % (str(seq_header[1:]), str(qual_header[1:])))
 
-        phred_scores, seq_header = _parse_quality_scores(fh, len(seq), variant,
-                                                         phred_offset)
-        yield constructor(seq, id=id_, description=desc, quality=phred_scores)
+        phred_scores, seq_header = _parse_quality_scores(fh, len(seq),
+                                                         variant,
+                                                         phred_offset,
+                                                         qual_header)
+        yield constructor(seq, metadata={'id': id_, 'description': desc},
+                          positional_metadata={'quality': phred_scores},
+                          **kwargs)
 
 
- at register_reader('fastq', BiologicalSequence)
+ at fastq.reader(Sequence)
 def _fastq_to_biological_sequence(fh, variant=None, phred_offset=None,
                                   seq_num=1):
     return _get_nth_sequence(
         _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
-                            constructor=BiologicalSequence),
-        seq_num)
-
-
- at register_reader('fastq', NucleotideSequence)
-def _fastq_to_nucleotide_sequence(fh, variant=None, phred_offset=None,
-                                  seq_num=1):
-    return _get_nth_sequence(
-        _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
-                            constructor=NucleotideSequence),
+                            constructor=Sequence),
         seq_num)
 
 
- at register_reader('fastq', DNASequence)
-def _fastq_to_dna_sequence(fh, variant=None, phred_offset=None, seq_num=1):
+ at fastq.reader(DNA)
+def _fastq_to_dna_sequence(fh, variant=None, phred_offset=None, seq_num=1,
+                           **kwargs):
     return _get_nth_sequence(
         _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
-                            constructor=DNASequence),
+                            constructor=DNA, **kwargs),
         seq_num)
 
 
- at register_reader('fastq', RNASequence)
-def _fastq_to_rna_sequence(fh, variant=None, phred_offset=None, seq_num=1):
+ at fastq.reader(RNA)
+def _fastq_to_rna_sequence(fh, variant=None, phred_offset=None, seq_num=1,
+                           **kwargs):
     return _get_nth_sequence(
         _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
-                            constructor=RNASequence),
+                            constructor=RNA, **kwargs),
         seq_num)
 
 
- at register_reader('fastq', ProteinSequence)
-def _fastq_to_protein_sequence(fh, variant=None, phred_offset=None, seq_num=1):
+ at fastq.reader(Protein)
+def _fastq_to_protein_sequence(fh, variant=None, phred_offset=None, seq_num=1,
+                               **kwargs):
     return _get_nth_sequence(
         _fastq_to_generator(fh, variant=variant, phred_offset=phred_offset,
-                            constructor=ProteinSequence),
+                            constructor=Protein,
+                            **kwargs),
         seq_num)
 
 
- at register_reader('fastq', SequenceCollection)
+ at fastq.reader(SequenceCollection)
 def _fastq_to_sequence_collection(fh, variant=None, phred_offset=None,
-                                  constructor=BiologicalSequence):
+                                  constructor=Sequence, **kwargs):
     return SequenceCollection(
         list(_fastq_to_generator(fh, variant=variant,
                                  phred_offset=phred_offset,
-                                 constructor=constructor)))
+                                 constructor=constructor, **kwargs)))
 
 
- at register_reader('fastq', Alignment)
+ at fastq.reader(Alignment)
 def _fastq_to_alignment(fh, variant=None, phred_offset=None,
-                        constructor=BiologicalSequence):
+                        constructor=Sequence, **kwargs):
     return Alignment(
         list(_fastq_to_generator(fh, variant=variant,
                                  phred_offset=phred_offset,
-                                 constructor=constructor)))
+                                 constructor=constructor, **kwargs)))
 
 
- at register_writer('fastq')
+ at fastq.writer(None)
 def _generator_to_fastq(obj, fh, variant=None, phred_offset=None,
                         id_whitespace_replacement='_',
-                        description_newline_replacement=' '):
+                        description_newline_replacement=' ', lowercase=None):
     formatted_records = _format_fasta_like_records(
-        obj, id_whitespace_replacement, description_newline_replacement, True)
+        obj, id_whitespace_replacement, description_newline_replacement, True,
+        lowercase=lowercase)
     for header, seq_str, qual_scores in formatted_records:
         qual_str = _encode_phred_to_qual(qual_scores, variant=variant,
                                          phred_offset=phred_offset)
@@ -383,7 +421,7 @@ def _generator_to_fastq(obj, fh, variant=None, phred_offset=None,
         fh.write('\n')
 
 
- at register_writer('fastq', BiologicalSequence)
+ at fastq.writer(Sequence)
 def _biological_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
                                   id_whitespace_replacement='_',
                                   description_newline_replacement=' '):
@@ -392,72 +430,68 @@ def _biological_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
                         description_newline_replacement)
 
 
- at register_writer('fastq', NucleotideSequence)
-def _nucleotide_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
-                                  id_whitespace_replacement='_',
-                                  description_newline_replacement=' '):
-    _sequences_to_fastq([obj], fh, variant, phred_offset,
-                        id_whitespace_replacement,
-                        description_newline_replacement)
-
-
- at register_writer('fastq', DNASequence)
+ at fastq.writer(DNA)
 def _dna_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
                            id_whitespace_replacement='_',
-                           description_newline_replacement=' '):
+                           description_newline_replacement=' ',
+                           lowercase=None):
     _sequences_to_fastq([obj], fh, variant, phred_offset,
                         id_whitespace_replacement,
-                        description_newline_replacement)
+                        description_newline_replacement, lowercase=lowercase)
 
 
- at register_writer('fastq', RNASequence)
+ at fastq.writer(RNA)
 def _rna_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
                            id_whitespace_replacement='_',
-                           description_newline_replacement=' '):
+                           description_newline_replacement=' ',
+                           lowercase=None):
     _sequences_to_fastq([obj], fh, variant, phred_offset,
                         id_whitespace_replacement,
-                        description_newline_replacement)
+                        description_newline_replacement, lowercase=lowercase)
 
 
- at register_writer('fastq', ProteinSequence)
+ at fastq.writer(Protein)
 def _protein_sequence_to_fastq(obj, fh, variant=None, phred_offset=None,
                                id_whitespace_replacement='_',
-                               description_newline_replacement=' '):
+                               description_newline_replacement=' ',
+                               lowercase=None):
     _sequences_to_fastq([obj], fh, variant, phred_offset,
                         id_whitespace_replacement,
-                        description_newline_replacement)
+                        description_newline_replacement, lowercase=lowercase)
 
 
- at register_writer('fastq', SequenceCollection)
+ at fastq.writer(SequenceCollection)
 def _sequence_collection_to_fastq(obj, fh, variant=None, phred_offset=None,
                                   id_whitespace_replacement='_',
-                                  description_newline_replacement=' '):
+                                  description_newline_replacement=' ',
+                                  lowercase=None):
     _sequences_to_fastq(obj, fh, variant, phred_offset,
                         id_whitespace_replacement,
-                        description_newline_replacement)
+                        description_newline_replacement, lowercase=lowercase)
 
 
- at register_writer('fastq', Alignment)
+ at fastq.writer(Alignment)
 def _alignment_to_fastq(obj, fh, variant=None, phred_offset=None,
                         id_whitespace_replacement='_',
-                        description_newline_replacement=' '):
+                        description_newline_replacement=' ',
+                        lowercase=None):
     _sequences_to_fastq(obj, fh, variant, phred_offset,
                         id_whitespace_replacement,
-                        description_newline_replacement)
+                        description_newline_replacement, lowercase=lowercase)
 
 
-def _line_generator(fh):
-    for line in fh:
-        line = line.rstrip('\n')
-        if not line:
-            raise FASTQFormatError("Found blank line in FASTQ-formatted file.")
-        yield line
+def _blank_error(unique_text):
+    error_string = ("Found blank or whitespace-only line {} in "
+                    "FASTQ file").format(unique_text)
+    raise FASTQFormatError(error_string)
 
 
-def _parse_sequence_data(fh):
+def _parse_sequence_data(fh, prev):
     seq_chunks = []
-    for chunk in _line_generator(fh):
+    for chunk in _line_generator(fh, skip_blanks=False):
         if chunk.startswith('+'):
+            if not prev:
+                _blank_error("before '+'")
             if not seq_chunks:
                 raise FASTQFormatError(
                     "Found FASTQ record without sequence data.")
@@ -467,43 +501,50 @@ def _parse_sequence_data(fh):
                 "Found FASTQ record that is missing a quality (+) header line "
                 "after sequence data.")
         else:
+            if not prev:
+                _blank_error("after header or within sequence")
             if _whitespace_regex.search(chunk):
                 raise FASTQFormatError(
-                    "Found whitespace in sequence data: %r" % chunk)
+                    "Found whitespace in sequence data: %r" % str(chunk))
             seq_chunks.append(chunk)
+        prev = chunk
 
     raise FASTQFormatError(
         "Found incomplete/truncated FASTQ record at end of file.")
 
 
-def _parse_quality_scores(fh, seq_len, variant, phred_offset):
+def _parse_quality_scores(fh, seq_len, variant, phred_offset, prev):
     phred_scores = []
     qual_len = 0
-    for chunk in _line_generator(fh):
-        if chunk.startswith('@') and qual_len == seq_len:
-            return phred_scores, chunk
-        else:
-            qual_len += len(chunk)
-
-            if qual_len > seq_len:
-                raise FASTQFormatError(
-                    "Found more quality score characters than sequence "
-                    "characters. Extra quality score characters: %r" %
-                    chunk[-(qual_len - seq_len):])
-
-            phred_scores.extend(
-                _decode_qual_to_phred(chunk, variant=variant,
-                                      phred_offset=phred_offset))
+    for chunk in _line_generator(fh, skip_blanks=False):
+        if chunk:
+            if chunk.startswith('@') and qual_len == seq_len:
+                return np.hstack(phred_scores), chunk
+            else:
+                if not prev:
+                    _blank_error("after '+' or within quality scores")
+                qual_len += len(chunk)
+
+                if qual_len > seq_len:
+                    raise FASTQFormatError(
+                        "Found more quality score characters than sequence "
+                        "characters. Extra quality score characters: %r" %
+                        chunk[-(qual_len - seq_len):])
+
+                phred_scores.append(
+                    _decode_qual_to_phred(chunk, variant=variant,
+                                          phred_offset=phred_offset))
+        prev = chunk
 
     if qual_len != seq_len:
         raise FASTQFormatError(
             "Found incomplete/truncated FASTQ record at end of file.")
-    return phred_scores, None
+    return np.hstack(phred_scores), None
 
 
 def _sequences_to_fastq(obj, fh, variant, phred_offset,
                         id_whitespace_replacement,
-                        description_newline_replacement):
+                        description_newline_replacement, lowercase=None):
     def seq_gen():
         for seq in obj:
             yield seq
@@ -511,4 +552,5 @@ def _sequences_to_fastq(obj, fh, variant, phred_offset,
     _generator_to_fastq(
         seq_gen(), fh, variant=variant, phred_offset=phred_offset,
         id_whitespace_replacement=id_whitespace_replacement,
-        description_newline_replacement=description_newline_replacement)
+        description_newline_replacement=description_newline_replacement,
+        lowercase=lowercase)
diff --git a/skbio/io/lsmat.py b/skbio/io/format/lsmat.py
similarity index 89%
rename from skbio/io/lsmat.py
rename to skbio/io/format/lsmat.py
index 0f54d4e..fa48192 100644
--- a/skbio/io/lsmat.py
+++ b/skbio/io/format/lsmat.py
@@ -1,8 +1,8 @@
 """
-Labeled square matrix format (:mod:`skbio.io.lsmat`)
-====================================================
+Labeled square matrix format (:mod:`skbio.io.format.lsmat`)
+===========================================================
 
-.. currentmodule:: skbio.io.lsmat
+.. currentmodule:: skbio.io.format.lsmat
 
 The labeled square matrix file format (``lsmat``) stores numeric square
 matrix data relating a set of objects along each axis. The format also stores
@@ -61,6 +61,7 @@ format. ``delimiter`` can be specified as a keyword argument when reading from
 or writing to a file.
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -69,18 +70,21 @@ or writing to a file.
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
 
 import csv
 
 import numpy as np
 
 from skbio.stats.distance import DissimilarityMatrix, DistanceMatrix
-from skbio.io import (register_reader, register_writer, register_sniffer,
-                      LSMatFormatError)
+from skbio.io import create_format, LSMatFormatError
+
+
+lsmat = create_format('lsmat')
 
 
- at register_sniffer('lsmat')
+ at lsmat.sniffer()
 def _lsmat_sniffer(fh):
     header = _find_header(fh)
 
@@ -100,22 +104,22 @@ def _lsmat_sniffer(fh):
     return False, {}
 
 
- at register_reader('lsmat', DissimilarityMatrix)
+ at lsmat.reader(DissimilarityMatrix)
 def _lsmat_to_dissimilarity_matrix(fh, delimiter='\t'):
     return _lsmat_to_matrix(DissimilarityMatrix, fh, delimiter)
 
 
- at register_reader('lsmat', DistanceMatrix)
+ at lsmat.reader(DistanceMatrix)
 def _lsmat_to_distance_matrix(fh, delimiter='\t'):
     return _lsmat_to_matrix(DistanceMatrix, fh, delimiter)
 
 
- at register_writer('lsmat', DissimilarityMatrix)
+ at lsmat.writer(DissimilarityMatrix)
 def _dissimilarity_matrix_to_lsmat(obj, fh, delimiter='\t'):
     _matrix_to_lsmat(obj, fh, delimiter)
 
 
- at register_writer('lsmat', DistanceMatrix)
+ at lsmat.writer(DistanceMatrix)
 def _distance_matrix_to_lsmat(obj, fh, delimiter='\t'):
     _matrix_to_lsmat(obj, fh, delimiter)
 
@@ -165,10 +169,10 @@ def _lsmat_to_matrix(cls, fh, delimiter):
         else:
             raise LSMatFormatError(
                 "Encountered mismatched IDs while parsing the "
-                "dissimilarity matrix file. Found '%s' but expected "
-                "'%s'. Please ensure that the IDs match between the "
+                "dissimilarity matrix file. Found %r but expected "
+                "%r. Please ensure that the IDs match between the "
                 "dissimilarity matrix header (first row) and the row "
-                "labels (first column)." % (row_id, expected_id))
+                "labels (first column)." % (str(row_id), str(expected_id)))
 
     if row_idx != num_ids - 1:
         raise LSMatFormatError("Expected %d row(s) of data, but found %d." %
@@ -197,7 +201,7 @@ def _parse_header(header, delimiter):
 
     if tokens[0]:
         raise LSMatFormatError(
-            "Header must start with delimiter %r." % delimiter)
+            "Header must start with delimiter %r." % str(delimiter))
 
     return [e.strip() for e in tokens[1:]]
 
@@ -216,12 +220,13 @@ def _parse_data(fh, delimiter):
 
 
 def _matrix_to_lsmat(obj, fh, delimiter):
+    delimiter = "%s" % delimiter
     ids = obj.ids
     fh.write(_format_ids(ids, delimiter))
     fh.write('\n')
 
     for id_, vals in zip(ids, obj.data):
-        fh.write(id_)
+        fh.write("%s" % id_)
         fh.write(delimiter)
         fh.write(delimiter.join(np.asarray(vals, dtype=np.str)))
         fh.write('\n')
diff --git a/skbio/io/newick.py b/skbio/io/format/newick.py
similarity index 97%
rename from skbio/io/newick.py
rename to skbio/io/format/newick.py
index 9ed0375..27c5c48 100644
--- a/skbio/io/newick.py
+++ b/skbio/io/format/newick.py
@@ -1,8 +1,8 @@
 """
-Newick format (:mod:`skbio.io.newick`)
-======================================
+Newick format (:mod:`skbio.io.format.newick`)
+=============================================
 
-.. currentmodule:: skbio.io.newick
+.. currentmodule:: skbio.io.format.newick
 
 Newick format (``newick``) stores spanning-trees with weighted edges and node
 names in a minimal file format [1]_. This is useful for representing
@@ -211,6 +211,7 @@ References
 .. [2] http://evolution.genetics.washington.edu/phylip/newicktree.html
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -219,16 +220,18 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
 
 from future.builtins import zip, range
 
-from skbio.io import (register_reader, register_writer, register_sniffer,
-                      NewickFormatError)
+from skbio.io import create_format, NewickFormatError
 from skbio.tree import TreeNode
 
+newick = create_format('newick')
+
 
- at register_sniffer("newick")
+ at newick.sniffer()
 def _newick_sniffer(fh):
     # Strategy:
     #   The following conditions preclude a file from being newick:
@@ -269,7 +272,7 @@ def _newick_sniffer(fh):
     return not empty, {}
 
 
- at register_reader('newick', TreeNode)
+ at newick.reader(TreeNode)
 def _newick_to_tree_node(fh, convert_underscores=True):
     tree_stack = []
     current_depth = 0
@@ -331,7 +334,7 @@ def _newick_to_tree_node(fh, convert_underscores=True):
                             " missing its root.")
 
 
- at register_writer("newick", TreeNode)
+ at newick.writer(TreeNode)
 def _tree_node_to_newick(obj, fh):
     operators = set(",:_;()[]")
     current_depth = 0
@@ -354,7 +357,7 @@ def _tree_node_to_newick(obj, fh):
             # an empty string as a label in Newick. Therefore, both None and ''
             # are considered to be the absence of a label.
             if node.name:
-                escaped = node.name.replace("'", "''")
+                escaped = "%s" % node.name.replace("'", "''")
                 if any(t in operators for t in node.name):
                     fh.write("'")
                     fh.write(escaped)
@@ -363,7 +366,7 @@ def _tree_node_to_newick(obj, fh):
                     fh.write(escaped.replace(" ", "_"))
             if node.length is not None:
                 fh.write(':')
-                fh.write(str(node.length))
+                fh.write("%s" % node.length)
             if nodes_left and nodes_left[-1][1] == current_depth:
                 fh.write(',')
 
diff --git a/skbio/io/ordination.py b/skbio/io/format/ordination.py
similarity index 96%
rename from skbio/io/ordination.py
rename to skbio/io/format/ordination.py
index 28c5743..422a29c 100644
--- a/skbio/io/ordination.py
+++ b/skbio/io/format/ordination.py
@@ -1,8 +1,8 @@
 r"""
-Ordination results format (:mod:`skbio.io.ordination`)
-======================================================
+Ordination results format (:mod:`skbio.io.format.ordination`)
+=============================================================
 
-.. currentmodule:: skbio.io.ordination
+.. currentmodule:: skbio.io.format.ordination
 
 The ordination results file format (``ordination``) stores the results of an
 ordination method in a human-readable, text-based format. The format supports
@@ -177,6 +177,7 @@ Load the ordination results from the file:
 >>> ord_res = OrdinationResults.read(or_f)
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -185,17 +186,19 @@ Load the ordination results from the file:
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
 from future.builtins import zip
 
 import numpy as np
 
 from skbio.stats.ordination import OrdinationResults
-from skbio.io import (register_reader, register_writer, register_sniffer,
-                      OrdinationFormatError)
+from skbio.io import create_format, OrdinationFormatError
+
+ordination = create_format('ordination')
 
 
- at register_sniffer('ordination')
+ at ordination.sniffer()
 def _ordination_sniffer(fh):
     # Smells an ordination file if *all* of the following lines are present
     # *from the beginning* of the file:
@@ -217,7 +220,7 @@ def _ordination_sniffer(fh):
     return False, {}
 
 
- at register_reader('ordination', OrdinationResults)
+ at ordination.reader(OrdinationResults)
 def _ordination_to_ordination_results(fh):
     eigvals = _parse_vector_section(fh, 'Eigvals')
     if eigvals is None:
@@ -362,7 +365,7 @@ def _parse_array_section(fh, header_id, has_ids=True):
     return data, ids
 
 
- at register_writer('ordination', OrdinationResults)
+ at ordination.writer(OrdinationResults)
 def _ordination_results_to_ordination(obj, fh):
     _write_vector_section(fh, 'Eigvals', obj.eigvals)
     _write_vector_section(fh, 'Proportion explained', obj.proportion_explained)
diff --git a/skbio/io/phylip.py b/skbio/io/format/phylip.py
similarity index 90%
rename from skbio/io/phylip.py
rename to skbio/io/format/phylip.py
index 3954cc6..197f116 100644
--- a/skbio/io/phylip.py
+++ b/skbio/io/format/phylip.py
@@ -1,8 +1,8 @@
 """
-PHYLIP multiple sequence alignment format (:mod:`skbio.io.phylip`)
-==================================================================
+PHYLIP multiple sequence alignment format (:mod:`skbio.io.format.phylip`)
+=========================================================================
 
-.. currentmodule:: skbio.io.phylip
+.. currentmodule:: skbio.io.format.phylip
 
 The PHYLIP file format stores a multiple sequence alignment. The format was
 originally defined and used in Joe Felsenstein's PHYLIP package [1]_, and has
@@ -123,9 +123,9 @@ Examples
 Let's create an alignment with three DNA sequences of equal length:
 
 >>> from skbio import Alignment, DNA
->>> seqs = [DNA('ACCGTTGTA-GTAGCT', id='seq1'),
-...         DNA('A--GTCGAA-GTACCT', id='sequence-2'),
-...         DNA('AGAGTTGAAGGTATCT', id='3')]
+>>> seqs = [DNA('ACCGTTGTA-GTAGCT', metadata={'id':'seq1'}),
+...         DNA('A--GTCGAA-GTACCT', metadata={'id':'sequence-2'}),
+...         DNA('AGAGTTGAAGGTATCT', metadata={'id':'3'})]
 >>> aln = Alignment(seqs)
 >>> aln
 <Alignment: n=3; mean +/- std length=16.00 +/- 0.00>
@@ -133,10 +133,9 @@ Let's create an alignment with three DNA sequences of equal length:
 Now let's write the alignment to file in PHYLIP format, and take a look at the
 output:
 
->>> from StringIO import StringIO
+>>> from io import StringIO
 >>> fh = StringIO()
->>> aln.write(fh, format='phylip')
->>> print(fh.getvalue())
+>>> print(aln.write(fh, format='phylip').getvalue())
 3 16
 seq1      ACCGTTGTA- GTAGCT
 sequence-2A--GTCGAA- GTACCT
@@ -152,9 +151,9 @@ width column.
 If the sequence IDs in an alignment surpass the 10-character limit, an error
 will be raised when we try to write a PHYLIP file:
 
->>> long_id_seqs = [DNA('ACCGT', id='seq1'),
-...                 DNA('A--GT', id='long-sequence-2'),
-...                 DNA('AGAGT', id='seq3')]
+>>> long_id_seqs = [DNA('ACCGT', metadata={'id':'seq1'}),
+...                 DNA('A--GT', metadata={'id':'long-sequence-2'}),
+...                 DNA('AGAGT', metadata={'id':'seq3'})]
 >>> long_id_aln = Alignment(long_id_seqs)
 >>> fh = StringIO()
 >>> long_id_aln.write(fh, format='phylip')
@@ -178,8 +177,7 @@ remap each of the IDs to integer-based IDs:
 We can now write the new alignment in PHYLIP format:
 
 >>> fh = StringIO()
->>> short_id_aln.write(fh, format='phylip')
->>> print(fh.getvalue())
+>>> print(short_id_aln.write(fh, format='phylip').getvalue())
 3 5
 1         ACCGT
 2         A--GT
@@ -197,6 +195,7 @@ References
 .. [5] http://www.bioperl.org/wiki/PHYLIP_multiple_alignment_format
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -205,14 +204,17 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
 
 from skbio.alignment import Alignment
-from skbio.io import register_writer, PhylipFormatError
-from skbio.io._base import _chunk_str
+from skbio.io import create_format, PhylipFormatError
+from skbio.util._misc import chunk_str
+
+phylip = create_format('phylip')
 
 
- at register_writer('phylip', Alignment)
+ at phylip.writer(Alignment)
 def _alignment_to_phylip(obj, fh):
 
     if obj.is_empty():
@@ -241,5 +243,5 @@ def _alignment_to_phylip(obj, fh):
 
     fmt = '{0:%d}{1}\n' % chunk_size
     for seq in obj:
-        chunked_seq = _chunk_str(str(seq), chunk_size, ' ')
-        fh.write(fmt.format(seq.id, chunked_seq))
+        chunked_seq = chunk_str(str(seq), chunk_size, ' ')
+        fh.write(fmt.format(seq.metadata['id'], chunked_seq))
diff --git a/skbio/io/qseq.py b/skbio/io/format/qseq.py
similarity index 73%
rename from skbio/io/qseq.py
rename to skbio/io/format/qseq.py
index e8a67c0..dc4d472 100644
--- a/skbio/io/qseq.py
+++ b/skbio/io/format/qseq.py
@@ -1,8 +1,8 @@
 r"""
-QSeq format (:mod:`skbio.io.qseq`)
-==================================
+QSeq format (:mod:`skbio.io.format.qseq`)
+=========================================
 
-.. currentmodule:: skbio.io.qseq
+.. currentmodule:: skbio.io.format.qseq
 
 The QSeq format (`qseq`) is a record-based, plain text output format produced
 by some DNA sequencers for storing biological sequence data, quality scores,
@@ -15,19 +15,17 @@ Format Support
 +------+------+---------------------------------------------------------------+
 |Reader|Writer|                          Object Class                         |
 +======+======+===============================================================+
-|Yes   |No    |generator of :mod:`skbio.sequence.BiologicalSequence` objects  |
+|Yes   |No    |generator of :mod:`skbio.sequence.Sequence` objects            |
 +------+------+---------------------------------------------------------------+
 |Yes   |No    |:mod:`skbio.alignment.SequenceCollection`                      |
 +------+------+---------------------------------------------------------------+
-|Yes   |No    |:mod:`skbio.sequence.BiologicalSequence`                       |
+|Yes   |No    |:mod:`skbio.sequence.Sequence`                                 |
 +------+------+---------------------------------------------------------------+
-|Yes   |No    |:mod:`skbio.sequence.NucleotideSequence`                       |
+|Yes   |No    |:mod:`skbio.sequence.DNA`                                      |
 +------+------+---------------------------------------------------------------+
-|Yes   |No    |:mod:`skbio.sequence.DNASequence`                              |
+|Yes   |No    |:mod:`skbio.sequence.RNA`                                      |
 +------+------+---------------------------------------------------------------+
-|Yes   |No    |:mod:`skbio.sequence.RNASequence`                              |
-+------+------+---------------------------------------------------------------+
-|Yes   |No    |:mod:`skbio.sequence.ProteinSequence`                          |
+|Yes   |No    |:mod:`skbio.sequence.Protein`                                  |
 +------+------+---------------------------------------------------------------+
 
 Format Specification
@@ -49,19 +47,27 @@ A QSeq file is composed of single-line records, delimited by tabs. There are
 
 For more details please refer to the CASAVA documentation [1]_.
 
+.. note:: When a QSeq file is read into a scikit-bio object, the object's
+   `metadata` attribute is automatically populated with data corresponding
+   to the names above.
+
+.. note:: `lowercase` functionality is supported when reading QSeq files,
+   depending on the object type the file is being read into. Refer to
+   specific object constructor documentation for details.
+
 .. note:: scikit-bio allows for the filter field to be ommitted, but it is not
    clear if this is part of the original format specification.
 
 Format Parameters
 -----------------
 The following parameters are the same as in FASTQ format
-(:mod:`skbio.io.fastq`):
+(:mod:`skbio.io.format.fastq`):
 
 - ``variant``: see ``variant`` parameter in FASTQ format
 - ``phred_offset``: see ``phred_offset`` parameter in FASTQ format
 
 The following additional parameters are the same as in FASTA format
-(:mod:`skbio.io.fasta`):
+(:mod:`skbio.io.format.fasta`):
 
 - ``constructor``: see ``constructor`` parameter in FASTA format
 - ``seq_num``: see ``seq_num`` parameter in FASTA format
@@ -108,6 +114,7 @@ References
 .. [1] http://biowulf.nih.gov/apps/CASAVA_UG_15011196B.pdf
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -116,22 +123,24 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
 
 from future.builtins import zip, range
 
-from skbio.io import register_reader, register_sniffer, QSeqFormatError
-from skbio.io._base import _decode_qual_to_phred, _get_nth_sequence
+from skbio.io import create_format, QSeqFormatError
+from skbio.io.format._base import _decode_qual_to_phred, _get_nth_sequence
 from skbio.alignment import SequenceCollection
-from skbio.sequence import (BiologicalSequence, NucleotideSequence,
-                            DNASequence, RNASequence, ProteinSequence)
+from skbio.sequence import Sequence, DNA, RNA, Protein
 
 _default_phred_offset = None
 _default_variant = None
 _will_filter = True
 
+qseq = create_format('qseq')
 
- at register_sniffer('qseq')
+
+ at qseq.sniffer()
 def _qseq_sniffer(fh):
     empty = True
     try:
@@ -143,10 +152,10 @@ def _qseq_sniffer(fh):
         return False, {}
 
 
- at register_reader('qseq')
-def _qseq_to_generator(fh, constructor=BiologicalSequence, filter=_will_filter,
+ at qseq.reader(None)
+def _qseq_to_generator(fh, constructor=Sequence, filter=_will_filter,
                        phred_offset=_default_phred_offset,
-                       variant=_default_variant):
+                       variant=_default_variant, **kwargs):
     for line in fh:
         (machine_name, run, lane, tile, x, y, index, read, seq, raw_qual,
          filtered) = _record_parser(line)
@@ -154,11 +163,21 @@ def _qseq_to_generator(fh, constructor=BiologicalSequence, filter=_will_filter,
             phred = _decode_qual_to_phred(raw_qual, variant, phred_offset)
             seq_id = '%s_%s:%s:%s:%s:%s#%s/%s' % (
                 machine_name, run, lane, tile, x, y, index, read)
-            yield constructor(seq, quality=phred, id=seq_id)
-
-
- at register_reader('qseq', SequenceCollection)
-def _qseq_to_sequence_collection(fh, constructor=BiologicalSequence,
+            yield constructor(seq, metadata={'id': seq_id,
+                                             'machine_name': machine_name,
+                                             'run_number': int(run),
+                                             'lane_number': int(lane),
+                                             'tile_number': int(tile),
+                                             'x': int(x),
+                                             'y': int(y),
+                                             'index': int(index),
+                                             'read_number': int(read)},
+                              positional_metadata={'quality': phred},
+                              **kwargs)
+
+
+ at qseq.reader(SequenceCollection)
+def _qseq_to_sequence_collection(fh, constructor=Sequence,
                                  filter=_will_filter,
                                  phred_offset=_default_phred_offset,
                                  variant=_default_variant):
@@ -167,49 +186,43 @@ def _qseq_to_sequence_collection(fh, constructor=BiologicalSequence,
         variant=variant)))
 
 
- at register_reader('qseq', BiologicalSequence)
+ at qseq.reader(Sequence)
 def _qseq_to_biological_sequence(fh, seq_num=1,
                                  phred_offset=_default_phred_offset,
                                  variant=_default_variant):
     return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
                              phred_offset=phred_offset, variant=variant,
-                             constructor=BiologicalSequence), seq_num)
-
-
- at register_reader('qseq', NucleotideSequence)
-def _qseq_to_nucleotide_sequence(fh, seq_num=1,
-                                 phred_offset=_default_phred_offset,
-                                 variant=_default_variant):
-    return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
-                             phred_offset=phred_offset, variant=variant,
-                             constructor=NucleotideSequence), seq_num)
+                             constructor=Sequence), seq_num)
 
 
- at register_reader('qseq', DNASequence)
+ at qseq.reader(DNA)
 def _qseq_to_dna_sequence(fh, seq_num=1,
                           phred_offset=_default_phred_offset,
-                          variant=_default_variant):
+                          variant=_default_variant, **kwargs):
     return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
                              phred_offset=phred_offset, variant=variant,
-                             constructor=DNASequence), seq_num)
+                             constructor=DNA, **kwargs),
+                             seq_num)
 
 
- at register_reader('qseq', RNASequence)
+ at qseq.reader(RNA)
 def _qseq_to_rna_sequence(fh, seq_num=1,
                           phred_offset=_default_phred_offset,
-                          variant=_default_variant):
+                          variant=_default_variant, **kwargs):
     return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
                              phred_offset=phred_offset, variant=variant,
-                             constructor=RNASequence), seq_num)
+                             constructor=RNA, **kwargs),
+                             seq_num)
 
 
- at register_reader('qseq', ProteinSequence)
+ at qseq.reader(Protein)
 def _qseq_to_protein_sequence(fh, seq_num=1,
                               phred_offset=_default_phred_offset,
-                              variant=_default_variant):
+                              variant=_default_variant, **kwargs):
     return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
                              phred_offset=phred_offset, variant=variant,
-                             constructor=ProteinSequence), seq_num)
+                             constructor=Protein, **kwargs),
+                             seq_num)
 
 
 def _record_parser(line):
diff --git a/skbio/format/sequences/tests/__init__.py b/skbio/io/format/tests/__init__.py
similarity index 84%
rename from skbio/format/sequences/tests/__init__.py
rename to skbio/io/format/tests/__init__.py
index c99682c..3fe3dc6 100644
--- a/skbio/format/sequences/tests/__init__.py
+++ b/skbio/io/format/tests/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -7,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/io/tests/data/empty b/skbio/io/format/tests/data/empty
similarity index 100%
rename from skbio/io/tests/data/empty
rename to skbio/io/format/tests/data/empty
diff --git a/skbio/io/tests/data/error_diff_ids.fastq b/skbio/io/format/tests/data/error_diff_ids.fastq
similarity index 100%
rename from skbio/io/tests/data/error_diff_ids.fastq
rename to skbio/io/format/tests/data/error_diff_ids.fastq
diff --git a/skbio/io/tests/data/error_double_qual.fastq b/skbio/io/format/tests/data/error_double_qual.fastq
similarity index 100%
rename from skbio/io/tests/data/error_double_qual.fastq
rename to skbio/io/format/tests/data/error_double_qual.fastq
diff --git a/skbio/io/tests/data/error_double_seq.fastq b/skbio/io/format/tests/data/error_double_seq.fastq
similarity index 100%
rename from skbio/io/tests/data/error_double_seq.fastq
rename to skbio/io/format/tests/data/error_double_seq.fastq
diff --git a/skbio/io/tests/data/error_long_qual.fastq b/skbio/io/format/tests/data/error_long_qual.fastq
similarity index 100%
rename from skbio/io/tests/data/error_long_qual.fastq
rename to skbio/io/format/tests/data/error_long_qual.fastq
diff --git a/skbio/io/tests/data/error_no_qual.fastq b/skbio/io/format/tests/data/error_no_qual.fastq
similarity index 100%
rename from skbio/io/tests/data/error_no_qual.fastq
rename to skbio/io/format/tests/data/error_no_qual.fastq
diff --git a/skbio/io/tests/data/error_qual_del.fastq b/skbio/io/format/tests/data/error_qual_del.fastq
similarity index 100%
rename from skbio/io/tests/data/error_qual_del.fastq
rename to skbio/io/format/tests/data/error_qual_del.fastq
diff --git a/skbio/io/tests/data/error_qual_escape.fastq b/skbio/io/format/tests/data/error_qual_escape.fastq
similarity index 100%
rename from skbio/io/tests/data/error_qual_escape.fastq
rename to skbio/io/format/tests/data/error_qual_escape.fastq
diff --git a/skbio/io/tests/data/error_qual_null.fastq b/skbio/io/format/tests/data/error_qual_null.fastq
similarity index 100%
rename from skbio/io/tests/data/error_qual_null.fastq
rename to skbio/io/format/tests/data/error_qual_null.fastq
diff --git a/skbio/io/tests/data/error_qual_space.fastq b/skbio/io/format/tests/data/error_qual_space.fastq
similarity index 100%
rename from skbio/io/tests/data/error_qual_space.fastq
rename to skbio/io/format/tests/data/error_qual_space.fastq
diff --git a/skbio/io/tests/data/error_qual_tab.fastq b/skbio/io/format/tests/data/error_qual_tab.fastq
similarity index 100%
rename from skbio/io/tests/data/error_qual_tab.fastq
rename to skbio/io/format/tests/data/error_qual_tab.fastq
diff --git a/skbio/io/tests/data/error_qual_unit_sep.fastq b/skbio/io/format/tests/data/error_qual_unit_sep.fastq
similarity index 100%
rename from skbio/io/tests/data/error_qual_unit_sep.fastq
rename to skbio/io/format/tests/data/error_qual_unit_sep.fastq
diff --git a/skbio/io/tests/data/error_qual_vtab.fastq b/skbio/io/format/tests/data/error_qual_vtab.fastq
similarity index 100%
rename from skbio/io/tests/data/error_qual_vtab.fastq
rename to skbio/io/format/tests/data/error_qual_vtab.fastq
diff --git a/skbio/io/tests/data/error_short_qual.fastq b/skbio/io/format/tests/data/error_short_qual.fastq
similarity index 100%
rename from skbio/io/tests/data/error_short_qual.fastq
rename to skbio/io/format/tests/data/error_short_qual.fastq
diff --git a/skbio/io/tests/data/error_spaces.fastq b/skbio/io/format/tests/data/error_spaces.fastq
similarity index 100%
rename from skbio/io/tests/data/error_spaces.fastq
rename to skbio/io/format/tests/data/error_spaces.fastq
diff --git a/skbio/io/tests/data/error_tabs.fastq b/skbio/io/format/tests/data/error_tabs.fastq
similarity index 100%
rename from skbio/io/tests/data/error_tabs.fastq
rename to skbio/io/format/tests/data/error_tabs.fastq
diff --git a/skbio/io/tests/data/error_trunc_at_plus.fastq b/skbio/io/format/tests/data/error_trunc_at_plus.fastq
similarity index 100%
rename from skbio/io/tests/data/error_trunc_at_plus.fastq
rename to skbio/io/format/tests/data/error_trunc_at_plus.fastq
diff --git a/skbio/io/tests/data/error_trunc_at_qual.fastq b/skbio/io/format/tests/data/error_trunc_at_qual.fastq
similarity index 100%
rename from skbio/io/tests/data/error_trunc_at_qual.fastq
rename to skbio/io/format/tests/data/error_trunc_at_qual.fastq
diff --git a/skbio/io/tests/data/error_trunc_at_seq.fastq b/skbio/io/format/tests/data/error_trunc_at_seq.fastq
similarity index 100%
rename from skbio/io/tests/data/error_trunc_at_seq.fastq
rename to skbio/io/format/tests/data/error_trunc_at_seq.fastq
diff --git a/skbio/io/tests/data/error_trunc_in_plus.fastq b/skbio/io/format/tests/data/error_trunc_in_plus.fastq
similarity index 100%
rename from skbio/io/tests/data/error_trunc_in_plus.fastq
rename to skbio/io/format/tests/data/error_trunc_in_plus.fastq
diff --git a/skbio/io/tests/data/error_trunc_in_qual.fastq b/skbio/io/format/tests/data/error_trunc_in_qual.fastq
similarity index 100%
rename from skbio/io/tests/data/error_trunc_in_qual.fastq
rename to skbio/io/format/tests/data/error_trunc_in_qual.fastq
diff --git a/skbio/io/tests/data/error_trunc_in_seq.fastq b/skbio/io/format/tests/data/error_trunc_in_seq.fastq
similarity index 100%
rename from skbio/io/tests/data/error_trunc_in_seq.fastq
rename to skbio/io/format/tests/data/error_trunc_in_seq.fastq
diff --git a/skbio/io/tests/data/error_trunc_in_title.fastq b/skbio/io/format/tests/data/error_trunc_in_title.fastq
similarity index 100%
rename from skbio/io/tests/data/error_trunc_in_title.fastq
rename to skbio/io/format/tests/data/error_trunc_in_title.fastq
diff --git a/skbio/io/tests/data/fasta_10_seqs b/skbio/io/format/tests/data/fasta_10_seqs
similarity index 100%
rename from skbio/io/tests/data/fasta_10_seqs
rename to skbio/io/format/tests/data/fasta_10_seqs
diff --git a/skbio/io/tests/data/fasta_3_seqs_defaults b/skbio/io/format/tests/data/fasta_3_seqs_defaults
similarity index 100%
rename from skbio/io/tests/data/fasta_3_seqs_defaults
rename to skbio/io/format/tests/data/fasta_3_seqs_defaults
diff --git a/skbio/io/tests/data/fasta_3_seqs_non_defaults b/skbio/io/format/tests/data/fasta_3_seqs_non_defaults
similarity index 100%
rename from skbio/io/tests/data/fasta_3_seqs_non_defaults
rename to skbio/io/format/tests/data/fasta_3_seqs_non_defaults
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/format/tests/data/fasta_5_blanks_start_of_file
similarity index 93%
copy from skbio/io/tests/data/fasta_multi_seq
copy to skbio/io/format/tests/data/fasta_5_blanks_start_of_file
index 906b765..c5b933c 100644
--- a/skbio/io/tests/data/fasta_multi_seq
+++ b/skbio/io/format/tests/data/fasta_5_blanks_start_of_file
@@ -1,3 +1,8 @@
+
+
+
+
+
 >seq1 desc1
 ACGT-acgt.
 >_____seq__2_
@@ -5,8 +10,6 @@ A
 > desc3
 AACGGuA
 >
-AcGtUTu
->
 ACGTTGCAccGG
 >
 ACGUU
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/format/tests/data/fasta_5_ws_lines_start_of_file
similarity index 70%
copy from skbio/io/tests/data/fasta_multi_seq
copy to skbio/io/format/tests/data/fasta_5_ws_lines_start_of_file
index 906b765..fa7dc9a 100644
--- a/skbio/io/tests/data/fasta_multi_seq
+++ b/skbio/io/format/tests/data/fasta_5_ws_lines_start_of_file
@@ -1,3 +1,8 @@
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
 >seq1 desc1
 ACGT-acgt.
 >_____seq__2_
@@ -5,8 +10,6 @@ A
 > desc3
 AACGGuA
 >
-AcGtUTu
->
 ACGTTGCAccGG
 >
 ACGUU
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/format/tests/data/fasta_6_blanks_start_of_file
similarity index 93%
copy from skbio/io/tests/data/fasta_multi_seq
copy to skbio/io/format/tests/data/fasta_6_blanks_start_of_file
index 906b765..a11a25a 100644
--- a/skbio/io/tests/data/fasta_multi_seq
+++ b/skbio/io/format/tests/data/fasta_6_blanks_start_of_file
@@ -1,3 +1,9 @@
+
+
+
+
+
+
 >seq1 desc1
 ACGT-acgt.
 >_____seq__2_
@@ -5,8 +11,6 @@ A
 > desc3
 AACGGuA
 >
-AcGtUTu
->
 ACGTTGCAccGG
 >
 ACGUU
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/format/tests/data/fasta_6_ws_lines_start_of_file
similarity index 66%
copy from skbio/io/tests/data/fasta_multi_seq
copy to skbio/io/format/tests/data/fasta_6_ws_lines_start_of_file
index 906b765..7a1507f 100644
--- a/skbio/io/tests/data/fasta_multi_seq
+++ b/skbio/io/format/tests/data/fasta_6_ws_lines_start_of_file
@@ -1,3 +1,9 @@
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
 >seq1 desc1
 ACGT-acgt.
 >_____seq__2_
@@ -5,8 +11,6 @@ A
 > desc3
 AACGGuA
 >
-AcGtUTu
->
 ACGTTGCAccGG
 >
 ACGUU
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/format/tests/data/fasta_blank_lines_between_records
similarity index 92%
copy from skbio/io/tests/data/fasta_multi_seq
copy to skbio/io/format/tests/data/fasta_blank_lines_between_records
index 906b765..4f07766 100644
--- a/skbio/io/tests/data/fasta_multi_seq
+++ b/skbio/io/format/tests/data/fasta_blank_lines_between_records
@@ -4,10 +4,20 @@ ACGT-acgt.
 A
 > desc3
 AACGGuA
->
-AcGtUTu
+
+
+
+
+
+
+
 >
 ACGTTGCAccGG
+
+
+
+
+
 >
 ACGUU
 >proteinseq  detailed description 		with  new  lines   
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/format/tests/data/fasta_blanks_end_of_file
similarity index 93%
copy from skbio/io/tests/data/fasta_multi_seq
copy to skbio/io/format/tests/data/fasta_blanks_end_of_file
index 906b765..50e84f2 100644
--- a/skbio/io/tests/data/fasta_multi_seq
+++ b/skbio/io/format/tests/data/fasta_blanks_end_of_file
@@ -5,10 +5,15 @@ A
 > desc3
 AACGGuA
 >
-AcGtUTu
->
 ACGTTGCAccGG
 >
 ACGUU
 >proteinseq  detailed description 		with  new  lines   
 pQqqqPPQQQ
+
+
+
+
+
+
+
diff --git a/skbio/io/tests/data/fasta_description_newline_replacement_empty_str b/skbio/io/format/tests/data/fasta_description_newline_replacement_empty_str
similarity index 100%
rename from skbio/io/tests/data/fasta_description_newline_replacement_empty_str
rename to skbio/io/format/tests/data/fasta_description_newline_replacement_empty_str
diff --git a/skbio/io/tests/data/fasta_description_newline_replacement_multi_char b/skbio/io/format/tests/data/fasta_description_newline_replacement_multi_char
similarity index 100%
rename from skbio/io/tests/data/fasta_description_newline_replacement_multi_char
rename to skbio/io/format/tests/data/fasta_description_newline_replacement_multi_char
diff --git a/skbio/io/tests/data/fasta_description_newline_replacement_none b/skbio/io/format/tests/data/fasta_description_newline_replacement_none
similarity index 100%
rename from skbio/io/tests/data/fasta_description_newline_replacement_none
rename to skbio/io/format/tests/data/fasta_description_newline_replacement_none
diff --git a/skbio/io/tests/data/fasta_id_whitespace_replacement_empty_str b/skbio/io/format/tests/data/fasta_id_whitespace_replacement_empty_str
similarity index 100%
rename from skbio/io/tests/data/fasta_id_whitespace_replacement_empty_str
rename to skbio/io/format/tests/data/fasta_id_whitespace_replacement_empty_str
diff --git a/skbio/io/tests/data/fasta_id_whitespace_replacement_multi_char b/skbio/io/format/tests/data/fasta_id_whitespace_replacement_multi_char
similarity index 100%
rename from skbio/io/tests/data/fasta_id_whitespace_replacement_multi_char
rename to skbio/io/format/tests/data/fasta_id_whitespace_replacement_multi_char
diff --git a/skbio/io/tests/data/fasta_id_whitespace_replacement_none b/skbio/io/format/tests/data/fasta_id_whitespace_replacement_none
similarity index 100%
rename from skbio/io/tests/data/fasta_id_whitespace_replacement_none
rename to skbio/io/format/tests/data/fasta_id_whitespace_replacement_none
diff --git a/skbio/io/tests/data/fasta_invalid_after_10_seqs b/skbio/io/format/tests/data/fasta_invalid_after_10_seqs
similarity index 100%
rename from skbio/io/tests/data/fasta_invalid_after_10_seqs
rename to skbio/io/format/tests/data/fasta_invalid_after_10_seqs
diff --git a/skbio/io/tests/data/fasta_invalid_blank_line b/skbio/io/format/tests/data/fasta_invalid_blank_line_after_header
similarity index 100%
rename from skbio/io/tests/data/fasta_invalid_blank_line
rename to skbio/io/format/tests/data/fasta_invalid_blank_line_after_header
index dfa357d..50dc4de 100644
--- a/skbio/io/tests/data/fasta_invalid_blank_line
+++ b/skbio/io/format/tests/data/fasta_invalid_blank_line_after_header
@@ -1,7 +1,7 @@
 >seq1 desc1
 ACGT
 >seq2 desc2
-AAAAA
 
+AAAAA
 >seq3 desc3
 CCC
diff --git a/skbio/io/tests/data/fasta_invalid_whitespace_only_line b/skbio/io/format/tests/data/fasta_invalid_blank_line_within_sequence
similarity index 89%
copy from skbio/io/tests/data/fasta_invalid_whitespace_only_line
copy to skbio/io/format/tests/data/fasta_invalid_blank_line_within_sequence
index ed70c42..2772254 100644
--- a/skbio/io/tests/data/fasta_invalid_whitespace_only_line
+++ b/skbio/io/format/tests/data/fasta_invalid_blank_line_within_sequence
@@ -1,7 +1,8 @@
 >seq1 desc1
 ACGT
 >seq2 desc2
-AAAAA
+AAA
 		     	   
+AA
 >seq3 desc3
 CCC
diff --git a/skbio/io/tests/data/fasta_invalid_missing_seq_data_middle b/skbio/io/format/tests/data/fasta_invalid_blank_sequence
similarity index 97%
copy from skbio/io/tests/data/fasta_invalid_missing_seq_data_middle
copy to skbio/io/format/tests/data/fasta_invalid_blank_sequence
index 746cfe1..ae1d393 100644
--- a/skbio/io/tests/data/fasta_invalid_missing_seq_data_middle
+++ b/skbio/io/format/tests/data/fasta_invalid_blank_sequence
@@ -1,5 +1,6 @@
 >seq1 desc1
 ACGT
 >seq2 desc2
+
 >seq3 desc3
 CCC
diff --git a/skbio/io/tests/data/fasta_invalid_legacy_format b/skbio/io/format/tests/data/fasta_invalid_legacy_format
similarity index 100%
rename from skbio/io/tests/data/fasta_invalid_legacy_format
rename to skbio/io/format/tests/data/fasta_invalid_legacy_format
diff --git a/skbio/io/tests/data/fasta_invalid_missing_header b/skbio/io/format/tests/data/fasta_invalid_missing_header
similarity index 100%
rename from skbio/io/tests/data/fasta_invalid_missing_header
rename to skbio/io/format/tests/data/fasta_invalid_missing_header
diff --git a/skbio/io/tests/data/fasta_invalid_missing_seq_data_first b/skbio/io/format/tests/data/fasta_invalid_missing_seq_data_first
similarity index 100%
rename from skbio/io/tests/data/fasta_invalid_missing_seq_data_first
rename to skbio/io/format/tests/data/fasta_invalid_missing_seq_data_first
diff --git a/skbio/io/tests/data/fasta_invalid_missing_seq_data_last b/skbio/io/format/tests/data/fasta_invalid_missing_seq_data_last
similarity index 100%
rename from skbio/io/tests/data/fasta_invalid_missing_seq_data_last
rename to skbio/io/format/tests/data/fasta_invalid_missing_seq_data_last
diff --git a/skbio/io/tests/data/fasta_invalid_missing_seq_data_middle b/skbio/io/format/tests/data/fasta_invalid_missing_seq_data_middle
similarity index 100%
rename from skbio/io/tests/data/fasta_invalid_missing_seq_data_middle
rename to skbio/io/format/tests/data/fasta_invalid_missing_seq_data_middle
diff --git a/skbio/io/tests/data/fasta_invalid_whitespace_only_line b/skbio/io/format/tests/data/fasta_invalid_whitespace_line_after_header
similarity index 100%
copy from skbio/io/tests/data/fasta_invalid_whitespace_only_line
copy to skbio/io/format/tests/data/fasta_invalid_whitespace_line_after_header
index ed70c42..6fbdf84 100644
--- a/skbio/io/tests/data/fasta_invalid_whitespace_only_line
+++ b/skbio/io/format/tests/data/fasta_invalid_whitespace_line_after_header
@@ -1,7 +1,7 @@
 >seq1 desc1
 ACGT
 >seq2 desc2
-AAAAA
 		     	   
+AAAAA
 >seq3 desc3
 CCC
diff --git a/skbio/io/tests/data/fasta_invalid_whitespace_only_line b/skbio/io/format/tests/data/fasta_invalid_whitespace_only_line_within_sequence
similarity index 91%
copy from skbio/io/tests/data/fasta_invalid_whitespace_only_line
copy to skbio/io/format/tests/data/fasta_invalid_whitespace_only_line_within_sequence
index ed70c42..160418d 100644
--- a/skbio/io/tests/data/fasta_invalid_whitespace_only_line
+++ b/skbio/io/format/tests/data/fasta_invalid_whitespace_only_line_within_sequence
@@ -3,5 +3,6 @@ ACGT
 >seq2 desc2
 AAAAA
 		     	   
+AAAAA
 >seq3 desc3
 CCC
diff --git a/skbio/io/tests/data/fasta_invalid_whitespace_only_line b/skbio/io/format/tests/data/fasta_invalid_whitespace_only_sequence
similarity index 90%
rename from skbio/io/tests/data/fasta_invalid_whitespace_only_line
rename to skbio/io/format/tests/data/fasta_invalid_whitespace_only_sequence
index ed70c42..72f7eec 100644
--- a/skbio/io/tests/data/fasta_invalid_whitespace_only_line
+++ b/skbio/io/format/tests/data/fasta_invalid_whitespace_only_sequence
@@ -1,7 +1,6 @@
 >seq1 desc1
 ACGT
 >seq2 desc2
-AAAAA
 		     	   
 >seq3 desc3
 CCC
diff --git a/skbio/io/tests/data/fasta_max_width_1 b/skbio/io/format/tests/data/fasta_max_width_1
similarity index 100%
rename from skbio/io/tests/data/fasta_max_width_1
rename to skbio/io/format/tests/data/fasta_max_width_1
diff --git a/skbio/io/tests/data/fasta_max_width_5 b/skbio/io/format/tests/data/fasta_max_width_5
similarity index 93%
rename from skbio/io/tests/data/fasta_max_width_5
rename to skbio/io/format/tests/data/fasta_max_width_5
index f2dad0d..a24d409 100644
--- a/skbio/io/tests/data/fasta_max_width_5
+++ b/skbio/io/format/tests/data/fasta_max_width_5
@@ -7,9 +7,6 @@ A
 AACGG
 uA
 >
-AcGtU
-Tu
->
 ACGTT
 GCAcc
 GG
diff --git a/skbio/io/tests/data/fasta_mixed_qual_scores b/skbio/io/format/tests/data/fasta_mixed_qual_scores
similarity index 100%
rename from skbio/io/tests/data/fasta_mixed_qual_scores
rename to skbio/io/format/tests/data/fasta_mixed_qual_scores
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/format/tests/data/fasta_multi_seq
similarity index 93%
copy from skbio/io/tests/data/fasta_multi_seq
copy to skbio/io/format/tests/data/fasta_multi_seq
index 906b765..7b66cfb 100644
--- a/skbio/io/tests/data/fasta_multi_seq
+++ b/skbio/io/format/tests/data/fasta_multi_seq
@@ -5,8 +5,6 @@ A
 > desc3
 AACGGuA
 >
-AcGtUTu
->
 ACGTTGCAccGG
 >
 ACGUU
diff --git a/skbio/io/tests/data/fasta_multi_seq_roundtrip b/skbio/io/format/tests/data/fasta_multi_seq_roundtrip
similarity index 100%
rename from skbio/io/tests/data/fasta_multi_seq_roundtrip
rename to skbio/io/format/tests/data/fasta_multi_seq_roundtrip
diff --git a/skbio/io/tests/data/fasta_prot_seqs_odd_labels b/skbio/io/format/tests/data/fasta_prot_seqs_odd_labels
similarity index 100%
rename from skbio/io/tests/data/fasta_prot_seqs_odd_labels
rename to skbio/io/format/tests/data/fasta_prot_seqs_odd_labels
diff --git a/skbio/io/tests/data/fasta_sequence_collection_different_type b/skbio/io/format/tests/data/fasta_sequence_collection_different_type
similarity index 83%
rename from skbio/io/tests/data/fasta_sequence_collection_different_type
rename to skbio/io/format/tests/data/fasta_sequence_collection_different_type
index 5fb090f..a9477fb 100644
--- a/skbio/io/tests/data/fasta_sequence_collection_different_type
+++ b/skbio/io/format/tests/data/fasta_sequence_collection_different_type
@@ -1,6 +1,6 @@
 > 
-AUG
+aUG
 >rnaseq-1 rnaseq desc 1  
-AUC
+AuC
 >rnaseq-2        rnaseq desc 2
-AUG
+AUg
diff --git a/skbio/io/tests/data/fasta_single_bio_seq_defaults b/skbio/io/format/tests/data/fasta_single_bio_seq_defaults
similarity index 72%
rename from skbio/io/tests/data/fasta_single_bio_seq_defaults
rename to skbio/io/format/tests/data/fasta_single_bio_seq_defaults
index fd6d562..12f1db9 100644
--- a/skbio/io/tests/data/fasta_single_bio_seq_defaults
+++ b/skbio/io/format/tests/data/fasta_single_bio_seq_defaults
@@ -1,2 +1,2 @@
 >f_o_o b a r
-ACGT
+ACgt
diff --git a/skbio/io/tests/data/fasta_single_bio_seq_non_defaults b/skbio/io/format/tests/data/fasta_single_bio_seq_non_defaults
similarity index 80%
rename from skbio/io/tests/data/fasta_single_bio_seq_non_defaults
rename to skbio/io/format/tests/data/fasta_single_bio_seq_non_defaults
index aed57ca..62fa0ad 100644
--- a/skbio/io/tests/data/fasta_single_bio_seq_non_defaults
+++ b/skbio/io/format/tests/data/fasta_single_bio_seq_non_defaults
@@ -1,5 +1,5 @@
 >f-o-o b_a_r
 A
 C
-G
-T
+g
+t
diff --git a/skbio/io/tests/data/fasta_single_dna_seq_defaults b/skbio/io/format/tests/data/fasta_single_dna_seq_defaults
similarity index 72%
rename from skbio/io/tests/data/fasta_single_dna_seq_defaults
rename to skbio/io/format/tests/data/fasta_single_dna_seq_defaults
index 7381787..5876227 100644
--- a/skbio/io/tests/data/fasta_single_dna_seq_defaults
+++ b/skbio/io/format/tests/data/fasta_single_dna_seq_defaults
@@ -1,2 +1,2 @@
 >f_o_o b a r
-TACG
+TAcg
diff --git a/skbio/io/tests/data/fasta_single_dna_seq_non_defaults b/skbio/io/format/tests/data/fasta_single_dna_seq_non_defaults
similarity index 80%
rename from skbio/io/tests/data/fasta_single_dna_seq_non_defaults
rename to skbio/io/format/tests/data/fasta_single_dna_seq_non_defaults
index 3743565..5083c12 100644
--- a/skbio/io/tests/data/fasta_single_dna_seq_non_defaults
+++ b/skbio/io/format/tests/data/fasta_single_dna_seq_non_defaults
@@ -1,5 +1,5 @@
 >f-o-o b_a_r
 T
 A
-C
-G
+c
+g
diff --git a/skbio/io/tests/data/fasta_single_prot_seq_defaults b/skbio/io/format/tests/data/fasta_single_prot_seq_defaults
similarity index 76%
rename from skbio/io/tests/data/fasta_single_prot_seq_defaults
rename to skbio/io/format/tests/data/fasta_single_prot_seq_defaults
index 6ef6b37..ffbba5d 100644
--- a/skbio/io/tests/data/fasta_single_prot_seq_defaults
+++ b/skbio/io/format/tests/data/fasta_single_prot_seq_defaults
@@ -1,2 +1,2 @@
 >f_o_o b a r
-PQQ
+PqQ
diff --git a/skbio/io/tests/data/fasta_single_prot_seq_non_defaults b/skbio/io/format/tests/data/fasta_single_prot_seq_non_defaults
similarity index 89%
rename from skbio/io/tests/data/fasta_single_prot_seq_non_defaults
rename to skbio/io/format/tests/data/fasta_single_prot_seq_non_defaults
index a031313..896dde9 100644
--- a/skbio/io/tests/data/fasta_single_prot_seq_non_defaults
+++ b/skbio/io/format/tests/data/fasta_single_prot_seq_non_defaults
@@ -1,4 +1,4 @@
 >f-o-o b_a_r
 P
-Q
+q
 Q
diff --git a/skbio/io/tests/data/fasta_single_rna_seq_defaults b/skbio/io/format/tests/data/fasta_single_rna_seq_defaults
similarity index 72%
rename from skbio/io/tests/data/fasta_single_rna_seq_defaults
rename to skbio/io/format/tests/data/fasta_single_rna_seq_defaults
index 738358c..2c7a54a 100644
--- a/skbio/io/tests/data/fasta_single_rna_seq_defaults
+++ b/skbio/io/format/tests/data/fasta_single_rna_seq_defaults
@@ -1,2 +1,2 @@
 >f_o_o b a r
-UACG
+uaCG
diff --git a/skbio/io/tests/data/fasta_single_rna_seq_non_defaults b/skbio/io/format/tests/data/fasta_single_rna_seq_non_defaults
similarity index 80%
rename from skbio/io/tests/data/fasta_single_rna_seq_non_defaults
rename to skbio/io/format/tests/data/fasta_single_rna_seq_non_defaults
index a43fd53..927a668 100644
--- a/skbio/io/tests/data/fasta_single_rna_seq_non_defaults
+++ b/skbio/io/format/tests/data/fasta_single_rna_seq_non_defaults
@@ -1,5 +1,5 @@
 >f-o-o b_a_r
-U
-A
+u
+a
 C
 G
diff --git a/skbio/io/tests/data/fasta_single_seq b/skbio/io/format/tests/data/fasta_single_seq
similarity index 100%
rename from skbio/io/tests/data/fasta_single_seq
rename to skbio/io/format/tests/data/fasta_single_seq
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/format/tests/data/fasta_ws_lines_between_records
similarity index 52%
copy from skbio/io/tests/data/fasta_multi_seq
copy to skbio/io/format/tests/data/fasta_ws_lines_between_records
index 906b765..8679272 100644
--- a/skbio/io/tests/data/fasta_multi_seq
+++ b/skbio/io/format/tests/data/fasta_ws_lines_between_records
@@ -4,11 +4,20 @@ ACGT-acgt.
 A
 > desc3
 AACGGuA
->
-AcGtUTu
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
 >
 ACGTTGCAccGG
 >
 ACGUU
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
 >proteinseq  detailed description 		with  new  lines   
 pQqqqPPQQQ
diff --git a/skbio/io/tests/data/fasta_multi_seq b/skbio/io/format/tests/data/fasta_ws_lines_end_of_file
similarity index 62%
rename from skbio/io/tests/data/fasta_multi_seq
rename to skbio/io/format/tests/data/fasta_ws_lines_end_of_file
index 906b765..2d18f4a 100644
--- a/skbio/io/tests/data/fasta_multi_seq
+++ b/skbio/io/format/tests/data/fasta_ws_lines_end_of_file
@@ -5,10 +5,16 @@ A
 > desc3
 AACGGuA
 >
-AcGtUTu
->
 ACGTTGCAccGG
 >
 ACGUU
 >proteinseq  detailed description 		with  new  lines   
 pQqqqPPQQQ
+         
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+     
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_5_blanks_start_of_file
similarity index 94%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_5_blanks_start_of_file
index a2b5187..d99c6aa 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_5_blanks_start_of_file
@@ -1,3 +1,8 @@
+
+
+
+
+
 @foo bar baz
 AACCGG
 +
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_5_ws_lines_start_of_file
similarity index 59%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_5_ws_lines_start_of_file
index a2b5187..081609c 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_5_ws_lines_start_of_file
@@ -1,3 +1,8 @@
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
 @foo bar baz
 AACCGG
 +
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_blank_lines
similarity index 96%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_blank_lines
index a2b5187..64cba24 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_blank_lines
@@ -2,11 +2,14 @@
 AACCGG
 +
 123456
+
 @bar baz foo
 TTGGCC
 +
 876543
+
 @baz foo bar
 GATTTC
 +
 567893
+
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_blank_after_header
similarity index 98%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_blank_after_header
index a2b5187..e4fd404 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_blank_after_header
@@ -1,4 +1,5 @@
 @foo bar baz
+
 AACCGG
 +
 123456
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_blank_after_plus
similarity index 98%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_blank_after_plus
index a2b5187..3b5e2a0 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_blank_after_plus
@@ -1,6 +1,7 @@
 @foo bar baz
 AACCGG
 +
+
 123456
 @bar baz foo
 TTGGCC
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_blank_after_seq
similarity index 98%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_blank_after_seq
index a2b5187..ce1c390 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_blank_after_seq
@@ -1,5 +1,6 @@
 @foo bar baz
 AACCGG
+
 +
 123456
 @bar baz foo
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_blank_in_seq_at_symbol
similarity index 89%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_blank_in_seq_at_symbol
index a2b5187..b9ddc50 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_blank_in_seq_at_symbol
@@ -1,8 +1,10 @@
 @foo bar baz
 AACCGG
 +
-123456
+MNO
+ at QR
 @bar baz foo
+
 TTGGCC
 +
 876543
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_blank_within_qual
similarity index 89%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_blank_within_qual
index a2b5187..bcbe90f 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_blank_within_qual
@@ -1,7 +1,9 @@
 @foo bar baz
 AACCGG
 +
-123456
+MNO
+
+PQR
 @bar baz foo
 TTGGCC
 +
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_blank_within_seq
similarity index 89%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_blank_within_seq
index a2b5187..01f9c29 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_blank_within_seq
@@ -1,5 +1,7 @@
 @foo bar baz
-AACCGG
+AAC
+
+CGG
 +
 123456
 @bar baz foo
diff --git a/skbio/io/tests/data/fastq_invalid_missing_header b/skbio/io/format/tests/data/fastq_invalid_missing_header
similarity index 100%
rename from skbio/io/tests/data/fastq_invalid_missing_header
rename to skbio/io/format/tests/data/fastq_invalid_missing_header
diff --git a/skbio/io/tests/data/fastq_invalid_missing_seq_data b/skbio/io/format/tests/data/fastq_invalid_missing_seq_data
similarity index 100%
rename from skbio/io/tests/data/fastq_invalid_missing_seq_data
rename to skbio/io/format/tests/data/fastq_invalid_missing_seq_data
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_ws_line_after_header
similarity index 87%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_ws_line_after_header
index a2b5187..faefe5d 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_ws_line_after_header
@@ -1,4 +1,5 @@
 @foo bar baz
+		     	   
 AACCGG
 +
 123456
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_ws_line_after_plus
similarity index 87%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_ws_line_after_plus
index a2b5187..0b66603 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_ws_line_after_plus
@@ -1,6 +1,7 @@
 @foo bar baz
 AACCGG
 +
+		     	   
 123456
 @bar baz foo
 TTGGCC
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_ws_line_after_seq
similarity index 87%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_ws_line_after_seq
index a2b5187..b4c0a76 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_ws_line_after_seq
@@ -1,5 +1,6 @@
 @foo bar baz
 AACCGG
+		     	   
 +
 123456
 @bar baz foo
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_ws_line_within_qual
similarity index 80%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_ws_line_within_qual
index a2b5187..43616c4 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_ws_line_within_qual
@@ -1,7 +1,9 @@
 @foo bar baz
 AACCGG
 +
-123456
+MNO
+		     	   
+PQR
 @bar baz foo
 TTGGCC
 +
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_invalid_ws_line_within_seq
similarity index 80%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_invalid_ws_line_within_seq
index a2b5187..111c60d 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_invalid_ws_line_within_seq
@@ -1,5 +1,7 @@
 @foo bar baz
-AACCGG
+AAC
+		     	   
+CGG
 +
 123456
 @bar baz foo
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_multi_blank_between_records
similarity index 91%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_multi_blank_between_records
index a2b5187..b814091 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_multi_blank_between_records
@@ -2,10 +2,18 @@
 AACCGG
 +
 123456
+
+
+
+
 @bar baz foo
 TTGGCC
 +
 876543
+
+
+
+
 @baz foo bar
 GATTTC
 +
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_multi_blank_end_of_file
similarity index 92%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_multi_blank_end_of_file
index a2b5187..ae429cf 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_multi_blank_end_of_file
@@ -10,3 +10,10 @@ TTGGCC
 GATTTC
 +
 567893
+
+
+
+
+
+
+
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_multi_blank_start_of_file
similarity index 93%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_multi_blank_start_of_file
index a2b5187..db4e8bc 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_multi_blank_start_of_file
@@ -1,3 +1,9 @@
+
+
+
+
+
+
 @foo bar baz
 AACCGG
 +
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_multi_seq_sanger
similarity index 100%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_multi_seq_sanger
diff --git a/skbio/io/format/tests/data/fastq_multi_whitespace_stripping b/skbio/io/format/tests/data/fastq_multi_whitespace_stripping
new file mode 100644
index 0000000..447b794
--- /dev/null
+++ b/skbio/io/format/tests/data/fastq_multi_whitespace_stripping
@@ -0,0 +1,14 @@
+		     	   @foo bar baz		     	   
+		     	   AACCGG		     	   
+		     	   +		     	   
+		     	   123456		     	   
+		     	   @bar baz foo		     	   
+		     	   TTGGCC		     	   
+		     	   +		     	   
+		     	   876543		     	   
+ at baz foo bar		     	   
+GATTTC		     	   
++		     	   
+567893		     	   
+		     	   
+		     	   
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_multi_ws_lines_between_records
similarity index 54%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_multi_ws_lines_between_records
index a2b5187..da36ab8 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_multi_ws_lines_between_records
@@ -2,10 +2,16 @@
 AACCGG
 +
 123456
+		     	   
+		     	   
 @bar baz foo
 TTGGCC
 +
 876543
+		     	   
+		     	   
+		     	   
+		     	   
 @baz foo bar
 GATTTC
 +
diff --git a/skbio/io/format/tests/data/fastq_multi_ws_lines_end_of_file b/skbio/io/format/tests/data/fastq_multi_ws_lines_end_of_file
new file mode 100644
index 0000000..ef3475a
--- /dev/null
+++ b/skbio/io/format/tests/data/fastq_multi_ws_lines_end_of_file
@@ -0,0 +1,20 @@
+ at foo bar baz
+AACCGG
++
+123456
+ at bar baz foo
+TTGGCC
++
+876543
+ at baz foo bar
+GATTTC
++
+567893
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_multi_ws_lines_start_of_file
similarity index 54%
copy from skbio/io/tests/data/fastq_multi_seq_sanger
copy to skbio/io/format/tests/data/fastq_multi_ws_lines_start_of_file
index a2b5187..170aed9 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_multi_ws_lines_start_of_file
@@ -1,3 +1,9 @@
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
 @foo bar baz
 AACCGG
 +
diff --git a/skbio/io/tests/data/fastq_single_seq_illumina1.3 b/skbio/io/format/tests/data/fastq_single_seq_illumina1.3
similarity index 93%
rename from skbio/io/tests/data/fastq_single_seq_illumina1.3
rename to skbio/io/format/tests/data/fastq_single_seq_illumina1.3
index f5de950..f81f5a9 100644
--- a/skbio/io/tests/data/fastq_single_seq_illumina1.3
+++ b/skbio/io/format/tests/data/fastq_single_seq_illumina1.3
@@ -1,5 +1,5 @@
 @ 	 bar	 baz  
-A
+a
 C
 G
 T
diff --git a/skbio/io/tests/data/fastq_multi_seq_sanger b/skbio/io/format/tests/data/fastq_whitespace_only_lines
similarity index 70%
rename from skbio/io/tests/data/fastq_multi_seq_sanger
rename to skbio/io/format/tests/data/fastq_whitespace_only_lines
index a2b5187..abf7fb3 100644
--- a/skbio/io/tests/data/fastq_multi_seq_sanger
+++ b/skbio/io/format/tests/data/fastq_whitespace_only_lines
@@ -2,11 +2,14 @@
 AACCGG
 +
 123456
+		     	   
 @bar baz foo
 TTGGCC
 +
 876543
+		     	   
 @baz foo bar
 GATTTC
 +
 567893
+		     	   
diff --git a/skbio/io/tests/data/fastq_wrapping_as_illumina_no_description b/skbio/io/format/tests/data/fastq_wrapping_as_illumina_no_description
similarity index 100%
rename from skbio/io/tests/data/fastq_wrapping_as_illumina_no_description
rename to skbio/io/format/tests/data/fastq_wrapping_as_illumina_no_description
diff --git a/skbio/io/tests/data/fastq_wrapping_as_sanger_no_description b/skbio/io/format/tests/data/fastq_wrapping_as_sanger_no_description
similarity index 100%
rename from skbio/io/tests/data/fastq_wrapping_as_sanger_no_description
rename to skbio/io/format/tests/data/fastq_wrapping_as_sanger_no_description
diff --git a/skbio/io/tests/data/fastq_wrapping_original_sanger_no_description b/skbio/io/format/tests/data/fastq_wrapping_original_sanger_no_description
similarity index 100%
rename from skbio/io/tests/data/fastq_wrapping_original_sanger_no_description
rename to skbio/io/format/tests/data/fastq_wrapping_original_sanger_no_description
diff --git a/skbio/io/tests/data/fastq_writer_illumina1.3_defaults b/skbio/io/format/tests/data/fastq_writer_illumina1.3_defaults
similarity index 77%
rename from skbio/io/tests/data/fastq_writer_illumina1.3_defaults
rename to skbio/io/format/tests/data/fastq_writer_illumina1.3_defaults
index 445cf69..94b9e9e 100644
--- a/skbio/io/tests/data/fastq_writer_illumina1.3_defaults
+++ b/skbio/io/format/tests/data/fastq_writer_illumina1.3_defaults
@@ -1,12 +1,12 @@
 @f_o__o bar  baz
-AACCGG
+AaCcGg
 +
 PQRSTU
 @bar baz foo
-TTGGCC
+TtGgCc
 +
 WVUTSR
 @ba___z foo bar
-GATTTC
+gAtTtC
 +
 TUVWXR
diff --git a/skbio/io/tests/data/fastq_writer_sanger_defaults b/skbio/io/format/tests/data/fastq_writer_sanger_defaults
similarity index 77%
rename from skbio/io/tests/data/fastq_writer_sanger_defaults
rename to skbio/io/format/tests/data/fastq_writer_sanger_defaults
index 14cc74b..e322668 100644
--- a/skbio/io/tests/data/fastq_writer_sanger_defaults
+++ b/skbio/io/format/tests/data/fastq_writer_sanger_defaults
@@ -1,12 +1,12 @@
 @f_o__o bar  baz
-AACCGG
+AaCcGg
 +
 123456
 @bar baz foo
-TTGGCC
+TtGgCc
 +
 876543
 @ba___z foo bar
-GATTTC
+gAtTtC
 +
 567893
diff --git a/skbio/io/tests/data/fastq_writer_sanger_non_defaults b/skbio/io/format/tests/data/fastq_writer_sanger_non_defaults
similarity index 77%
rename from skbio/io/tests/data/fastq_writer_sanger_non_defaults
rename to skbio/io/format/tests/data/fastq_writer_sanger_non_defaults
index 0bb9718..9c93b55 100644
--- a/skbio/io/tests/data/fastq_writer_sanger_non_defaults
+++ b/skbio/io/format/tests/data/fastq_writer_sanger_non_defaults
@@ -1,12 +1,12 @@
 @f%o%%o bar^^baz
-AACCGG
+AaCcGg
 +
 123456
 @bar baz foo
-TTGGCC
+TtGgCc
 +
 876543
 @ba%%%z foo bar
-GATTTC
+gAtTtC
 +
 567893
diff --git a/skbio/io/tests/data/illumina_full_range_as_illumina.fastq b/skbio/io/format/tests/data/illumina_full_range_as_illumina.fastq
similarity index 100%
rename from skbio/io/tests/data/illumina_full_range_as_illumina.fastq
rename to skbio/io/format/tests/data/illumina_full_range_as_illumina.fastq
diff --git a/skbio/io/tests/data/illumina_full_range_as_sanger.fastq b/skbio/io/format/tests/data/illumina_full_range_as_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/illumina_full_range_as_sanger.fastq
rename to skbio/io/format/tests/data/illumina_full_range_as_sanger.fastq
diff --git a/skbio/io/tests/data/illumina_full_range_original_illumina.fastq b/skbio/io/format/tests/data/illumina_full_range_original_illumina.fastq
similarity index 100%
rename from skbio/io/tests/data/illumina_full_range_original_illumina.fastq
rename to skbio/io/format/tests/data/illumina_full_range_original_illumina.fastq
diff --git a/skbio/io/tests/data/longreads_as_illumina.fastq b/skbio/io/format/tests/data/longreads_as_illumina.fastq
similarity index 100%
rename from skbio/io/tests/data/longreads_as_illumina.fastq
rename to skbio/io/format/tests/data/longreads_as_illumina.fastq
diff --git a/skbio/io/tests/data/longreads_as_sanger.fastq b/skbio/io/format/tests/data/longreads_as_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/longreads_as_sanger.fastq
rename to skbio/io/format/tests/data/longreads_as_sanger.fastq
diff --git a/skbio/io/tests/data/longreads_original_sanger.fastq b/skbio/io/format/tests/data/longreads_original_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/longreads_original_sanger.fastq
rename to skbio/io/format/tests/data/longreads_original_sanger.fastq
diff --git a/skbio/io/tests/data/misc_dna_as_illumina.fastq b/skbio/io/format/tests/data/misc_dna_as_illumina.fastq
similarity index 100%
rename from skbio/io/tests/data/misc_dna_as_illumina.fastq
rename to skbio/io/format/tests/data/misc_dna_as_illumina.fastq
diff --git a/skbio/io/tests/data/misc_dna_as_sanger.fastq b/skbio/io/format/tests/data/misc_dna_as_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/misc_dna_as_sanger.fastq
rename to skbio/io/format/tests/data/misc_dna_as_sanger.fastq
diff --git a/skbio/io/tests/data/misc_dna_original_sanger.fastq b/skbio/io/format/tests/data/misc_dna_original_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/misc_dna_original_sanger.fastq
rename to skbio/io/format/tests/data/misc_dna_original_sanger.fastq
diff --git a/skbio/io/tests/data/misc_rna_as_illumina.fastq b/skbio/io/format/tests/data/misc_rna_as_illumina.fastq
similarity index 100%
rename from skbio/io/tests/data/misc_rna_as_illumina.fastq
rename to skbio/io/format/tests/data/misc_rna_as_illumina.fastq
diff --git a/skbio/io/tests/data/misc_rna_as_sanger.fastq b/skbio/io/format/tests/data/misc_rna_as_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/misc_rna_as_sanger.fastq
rename to skbio/io/format/tests/data/misc_rna_as_sanger.fastq
diff --git a/skbio/io/tests/data/misc_rna_original_sanger.fastq b/skbio/io/format/tests/data/misc_rna_original_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/misc_rna_original_sanger.fastq
rename to skbio/io/format/tests/data/misc_rna_original_sanger.fastq
diff --git a/skbio/io/tests/data/ordination_L&L_CA_data_scores b/skbio/io/format/tests/data/ordination_L&L_CA_data_scores
similarity index 100%
rename from skbio/io/tests/data/ordination_L&L_CA_data_scores
rename to skbio/io/format/tests/data/ordination_L&L_CA_data_scores
diff --git a/skbio/io/tests/data/ordination_PCoA_sample_data_3_scores b/skbio/io/format/tests/data/ordination_PCoA_sample_data_3_scores
similarity index 100%
rename from skbio/io/tests/data/ordination_PCoA_sample_data_3_scores
rename to skbio/io/format/tests/data/ordination_PCoA_sample_data_3_scores
diff --git a/skbio/io/tests/data/ordination_error1 b/skbio/io/format/tests/data/ordination_error1
similarity index 100%
rename from skbio/io/tests/data/ordination_error1
rename to skbio/io/format/tests/data/ordination_error1
diff --git a/skbio/io/tests/data/ordination_error10 b/skbio/io/format/tests/data/ordination_error10
similarity index 100%
rename from skbio/io/tests/data/ordination_error10
rename to skbio/io/format/tests/data/ordination_error10
diff --git a/skbio/io/tests/data/ordination_error11 b/skbio/io/format/tests/data/ordination_error11
similarity index 100%
rename from skbio/io/tests/data/ordination_error11
rename to skbio/io/format/tests/data/ordination_error11
diff --git a/skbio/io/tests/data/ordination_error12 b/skbio/io/format/tests/data/ordination_error12
similarity index 100%
rename from skbio/io/tests/data/ordination_error12
rename to skbio/io/format/tests/data/ordination_error12
diff --git a/skbio/io/tests/data/ordination_error13 b/skbio/io/format/tests/data/ordination_error13
similarity index 100%
rename from skbio/io/tests/data/ordination_error13
rename to skbio/io/format/tests/data/ordination_error13
diff --git a/skbio/io/tests/data/ordination_error14 b/skbio/io/format/tests/data/ordination_error14
similarity index 100%
rename from skbio/io/tests/data/ordination_error14
rename to skbio/io/format/tests/data/ordination_error14
diff --git a/skbio/io/tests/data/ordination_error15 b/skbio/io/format/tests/data/ordination_error15
similarity index 100%
rename from skbio/io/tests/data/ordination_error15
rename to skbio/io/format/tests/data/ordination_error15
diff --git a/skbio/io/tests/data/ordination_error16 b/skbio/io/format/tests/data/ordination_error16
similarity index 100%
rename from skbio/io/tests/data/ordination_error16
rename to skbio/io/format/tests/data/ordination_error16
diff --git a/skbio/io/tests/data/ordination_error17 b/skbio/io/format/tests/data/ordination_error17
similarity index 100%
rename from skbio/io/tests/data/ordination_error17
rename to skbio/io/format/tests/data/ordination_error17
diff --git a/skbio/io/tests/data/ordination_error18 b/skbio/io/format/tests/data/ordination_error18
similarity index 100%
rename from skbio/io/tests/data/ordination_error18
rename to skbio/io/format/tests/data/ordination_error18
diff --git a/skbio/io/tests/data/ordination_error19 b/skbio/io/format/tests/data/ordination_error19
similarity index 100%
rename from skbio/io/tests/data/ordination_error19
rename to skbio/io/format/tests/data/ordination_error19
diff --git a/skbio/io/tests/data/ordination_error2 b/skbio/io/format/tests/data/ordination_error2
similarity index 100%
rename from skbio/io/tests/data/ordination_error2
rename to skbio/io/format/tests/data/ordination_error2
diff --git a/skbio/io/tests/data/ordination_error20 b/skbio/io/format/tests/data/ordination_error20
similarity index 100%
rename from skbio/io/tests/data/ordination_error20
rename to skbio/io/format/tests/data/ordination_error20
diff --git a/skbio/io/tests/data/ordination_error21 b/skbio/io/format/tests/data/ordination_error21
similarity index 100%
rename from skbio/io/tests/data/ordination_error21
rename to skbio/io/format/tests/data/ordination_error21
diff --git a/skbio/io/tests/data/ordination_error22 b/skbio/io/format/tests/data/ordination_error22
similarity index 100%
rename from skbio/io/tests/data/ordination_error22
rename to skbio/io/format/tests/data/ordination_error22
diff --git a/skbio/io/tests/data/ordination_error23 b/skbio/io/format/tests/data/ordination_error23
similarity index 100%
rename from skbio/io/tests/data/ordination_error23
rename to skbio/io/format/tests/data/ordination_error23
diff --git a/skbio/io/tests/data/ordination_error24 b/skbio/io/format/tests/data/ordination_error24
similarity index 100%
rename from skbio/io/tests/data/ordination_error24
rename to skbio/io/format/tests/data/ordination_error24
diff --git a/skbio/io/tests/data/ordination_error3 b/skbio/io/format/tests/data/ordination_error3
similarity index 100%
rename from skbio/io/tests/data/ordination_error3
rename to skbio/io/format/tests/data/ordination_error3
diff --git a/skbio/io/tests/data/ordination_error4 b/skbio/io/format/tests/data/ordination_error4
similarity index 100%
rename from skbio/io/tests/data/ordination_error4
rename to skbio/io/format/tests/data/ordination_error4
diff --git a/skbio/io/tests/data/ordination_error5 b/skbio/io/format/tests/data/ordination_error5
similarity index 100%
rename from skbio/io/tests/data/ordination_error5
rename to skbio/io/format/tests/data/ordination_error5
diff --git a/skbio/io/tests/data/ordination_error6 b/skbio/io/format/tests/data/ordination_error6
similarity index 100%
rename from skbio/io/tests/data/ordination_error6
rename to skbio/io/format/tests/data/ordination_error6
diff --git a/skbio/io/tests/data/ordination_error7 b/skbio/io/format/tests/data/ordination_error7
similarity index 100%
rename from skbio/io/tests/data/ordination_error7
rename to skbio/io/format/tests/data/ordination_error7
diff --git a/skbio/io/tests/data/ordination_error8 b/skbio/io/format/tests/data/ordination_error8
similarity index 100%
rename from skbio/io/tests/data/ordination_error8
rename to skbio/io/format/tests/data/ordination_error8
diff --git a/skbio/io/tests/data/ordination_error9 b/skbio/io/format/tests/data/ordination_error9
similarity index 100%
rename from skbio/io/tests/data/ordination_error9
rename to skbio/io/format/tests/data/ordination_error9
diff --git a/skbio/io/tests/data/ordination_example2_scores b/skbio/io/format/tests/data/ordination_example2_scores
similarity index 100%
rename from skbio/io/tests/data/ordination_example2_scores
rename to skbio/io/format/tests/data/ordination_example2_scores
diff --git a/skbio/io/tests/data/ordination_example3_scores b/skbio/io/format/tests/data/ordination_example3_scores
similarity index 100%
rename from skbio/io/tests/data/ordination_example3_scores
rename to skbio/io/format/tests/data/ordination_example3_scores
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_CCA_site b/skbio/io/format/tests/data/ordination_exp_Ordination_CCA_site
similarity index 100%
rename from skbio/io/tests/data/ordination_exp_Ordination_CCA_site
rename to skbio/io/format/tests/data/ordination_exp_Ordination_CCA_site
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_CCA_site_constraints b/skbio/io/format/tests/data/ordination_exp_Ordination_CCA_site_constraints
similarity index 100%
rename from skbio/io/tests/data/ordination_exp_Ordination_CCA_site_constraints
rename to skbio/io/format/tests/data/ordination_exp_Ordination_CCA_site_constraints
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_CCA_species b/skbio/io/format/tests/data/ordination_exp_Ordination_CCA_species
similarity index 100%
rename from skbio/io/tests/data/ordination_exp_Ordination_CCA_species
rename to skbio/io/format/tests/data/ordination_exp_Ordination_CCA_species
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_PCoA_site b/skbio/io/format/tests/data/ordination_exp_Ordination_PCoA_site
similarity index 100%
rename from skbio/io/tests/data/ordination_exp_Ordination_PCoA_site
rename to skbio/io/format/tests/data/ordination_exp_Ordination_PCoA_site
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_RDA_site b/skbio/io/format/tests/data/ordination_exp_Ordination_RDA_site
similarity index 100%
rename from skbio/io/tests/data/ordination_exp_Ordination_RDA_site
rename to skbio/io/format/tests/data/ordination_exp_Ordination_RDA_site
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_RDA_site_constraints b/skbio/io/format/tests/data/ordination_exp_Ordination_RDA_site_constraints
similarity index 100%
rename from skbio/io/tests/data/ordination_exp_Ordination_RDA_site_constraints
rename to skbio/io/format/tests/data/ordination_exp_Ordination_RDA_site_constraints
diff --git a/skbio/io/tests/data/ordination_exp_Ordination_RDA_species b/skbio/io/format/tests/data/ordination_exp_Ordination_RDA_species
similarity index 100%
rename from skbio/io/tests/data/ordination_exp_Ordination_RDA_species
rename to skbio/io/format/tests/data/ordination_exp_Ordination_RDA_species
diff --git a/skbio/io/tests/data/phylip_dna_3_seqs b/skbio/io/format/tests/data/phylip_dna_3_seqs
similarity index 100%
rename from skbio/io/tests/data/phylip_dna_3_seqs
rename to skbio/io/format/tests/data/phylip_dna_3_seqs
diff --git a/skbio/io/tests/data/phylip_single_seq_long b/skbio/io/format/tests/data/phylip_single_seq_long
similarity index 100%
rename from skbio/io/tests/data/phylip_single_seq_long
rename to skbio/io/format/tests/data/phylip_single_seq_long
diff --git a/skbio/io/tests/data/phylip_single_seq_short b/skbio/io/format/tests/data/phylip_single_seq_short
similarity index 100%
rename from skbio/io/tests/data/phylip_single_seq_short
rename to skbio/io/format/tests/data/phylip_single_seq_short
diff --git a/skbio/io/tests/data/phylip_two_chunks b/skbio/io/format/tests/data/phylip_two_chunks
similarity index 100%
rename from skbio/io/tests/data/phylip_two_chunks
rename to skbio/io/format/tests/data/phylip_two_chunks
diff --git a/skbio/io/tests/data/phylip_variable_length_ids b/skbio/io/format/tests/data/phylip_variable_length_ids
similarity index 100%
rename from skbio/io/tests/data/phylip_variable_length_ids
rename to skbio/io/format/tests/data/phylip_variable_length_ids
diff --git a/skbio/io/tests/data/qseq_invalid_filter b/skbio/io/format/tests/data/qseq_invalid_filter
similarity index 100%
rename from skbio/io/tests/data/qseq_invalid_filter
rename to skbio/io/format/tests/data/qseq_invalid_filter
diff --git a/skbio/io/tests/data/qseq_invalid_lane b/skbio/io/format/tests/data/qseq_invalid_lane
similarity index 100%
rename from skbio/io/tests/data/qseq_invalid_lane
rename to skbio/io/format/tests/data/qseq_invalid_lane
diff --git a/skbio/io/tests/data/qseq_invalid_read b/skbio/io/format/tests/data/qseq_invalid_read
similarity index 100%
rename from skbio/io/tests/data/qseq_invalid_read
rename to skbio/io/format/tests/data/qseq_invalid_read
diff --git a/skbio/io/tests/data/qseq_invalid_tile b/skbio/io/format/tests/data/qseq_invalid_tile
similarity index 100%
rename from skbio/io/tests/data/qseq_invalid_tile
rename to skbio/io/format/tests/data/qseq_invalid_tile
diff --git a/skbio/io/tests/data/qseq_invalid_x b/skbio/io/format/tests/data/qseq_invalid_x
similarity index 100%
rename from skbio/io/tests/data/qseq_invalid_x
rename to skbio/io/format/tests/data/qseq_invalid_x
diff --git a/skbio/io/tests/data/qseq_invalid_y b/skbio/io/format/tests/data/qseq_invalid_y
similarity index 100%
rename from skbio/io/tests/data/qseq_invalid_y
rename to skbio/io/format/tests/data/qseq_invalid_y
diff --git a/skbio/io/tests/data/qseq_multi_seq_illumina1.3 b/skbio/io/format/tests/data/qseq_multi_seq_illumina1.3
similarity index 100%
rename from skbio/io/tests/data/qseq_multi_seq_illumina1.3
rename to skbio/io/format/tests/data/qseq_multi_seq_illumina1.3
diff --git a/skbio/io/tests/data/qseq_single_seq_sanger b/skbio/io/format/tests/data/qseq_single_seq_sanger
similarity index 100%
rename from skbio/io/tests/data/qseq_single_seq_sanger
rename to skbio/io/format/tests/data/qseq_single_seq_sanger
diff --git a/skbio/io/tests/data/qual_2_seqs_defaults b/skbio/io/format/tests/data/qual_2_seqs_defaults
similarity index 100%
rename from skbio/io/tests/data/qual_2_seqs_defaults
rename to skbio/io/format/tests/data/qual_2_seqs_defaults
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults b/skbio/io/format/tests/data/qual_3_seqs_defaults
similarity index 100%
copy from skbio/io/tests/data/qual_3_seqs_defaults
copy to skbio/io/format/tests/data/qual_3_seqs_defaults
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults_desc_mismatch b/skbio/io/format/tests/data/qual_3_seqs_defaults_desc_mismatch
similarity index 100%
rename from skbio/io/tests/data/qual_3_seqs_defaults_desc_mismatch
rename to skbio/io/format/tests/data/qual_3_seqs_defaults_desc_mismatch
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults_extra b/skbio/io/format/tests/data/qual_3_seqs_defaults_extra
similarity index 100%
rename from skbio/io/tests/data/qual_3_seqs_defaults_extra
rename to skbio/io/format/tests/data/qual_3_seqs_defaults_extra
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults_id_mismatch b/skbio/io/format/tests/data/qual_3_seqs_defaults_id_mismatch
similarity index 100%
rename from skbio/io/tests/data/qual_3_seqs_defaults_id_mismatch
rename to skbio/io/format/tests/data/qual_3_seqs_defaults_id_mismatch
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults_length_mismatch b/skbio/io/format/tests/data/qual_3_seqs_defaults_length_mismatch
similarity index 100%
rename from skbio/io/tests/data/qual_3_seqs_defaults_length_mismatch
rename to skbio/io/format/tests/data/qual_3_seqs_defaults_length_mismatch
diff --git a/skbio/io/tests/data/qual_3_seqs_non_defaults b/skbio/io/format/tests/data/qual_3_seqs_non_defaults
similarity index 100%
rename from skbio/io/tests/data/qual_3_seqs_non_defaults
rename to skbio/io/format/tests/data/qual_3_seqs_non_defaults
diff --git a/skbio/io/tests/data/qual_multi_seq b/skbio/io/format/tests/data/qual_5_blanks_start_of_file
similarity index 51%
copy from skbio/io/tests/data/qual_multi_seq
copy to skbio/io/format/tests/data/qual_5_blanks_start_of_file
index 392dfe7..7966431 100644
--- a/skbio/io/tests/data/qual_multi_seq
+++ b/skbio/io/format/tests/data/qual_5_blanks_start_of_file
@@ -1,14 +1,17 @@
+
+
+
+
+
 >seq1 desc1
-10 20 30 10 0 0 0 88888 1 3456
+10 20 30 10 0 0 0 255 1 255
 >_____seq__2_
 42
 > desc3
 0 0 0 0 0 0 0
 >
-1 2 3 4 5 6 777
->
-55 10 0 999 1 1 8 775 40 10 10 0
+55 10 0 99 1 1 8 77 40 10 10 0
 >
 10 9 8 7 6
 >proteinseq  detailed description 		with  new  lines   
-42 42 442 442 42 42 42 42 42 43
+42 42 255 255 42 42 42 42 42 43
diff --git a/skbio/io/format/tests/data/qual_5_ws_lines_start_of_file b/skbio/io/format/tests/data/qual_5_ws_lines_start_of_file
new file mode 100644
index 0000000..c971fca
--- /dev/null
+++ b/skbio/io/format/tests/data/qual_5_ws_lines_start_of_file
@@ -0,0 +1,17 @@
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+>seq1 desc1
+10 20 30 10 0 0 0 255 1 255
+>_____seq__2_
+42
+> desc3
+0 0 0 0 0 0 0
+>
+55 10 0 99 1 1 8 77 40 10 10 0
+>
+10 9 8 7 6
+>proteinseq  detailed description 		with  new  lines   
+42 42 255 255 42 42 42 42 42 43
diff --git a/skbio/io/tests/data/qual_multi_seq b/skbio/io/format/tests/data/qual_6_blanks_start_of_file
similarity index 51%
copy from skbio/io/tests/data/qual_multi_seq
copy to skbio/io/format/tests/data/qual_6_blanks_start_of_file
index 392dfe7..475099b 100644
--- a/skbio/io/tests/data/qual_multi_seq
+++ b/skbio/io/format/tests/data/qual_6_blanks_start_of_file
@@ -1,14 +1,18 @@
+
+
+
+
+
+
 >seq1 desc1
-10 20 30 10 0 0 0 88888 1 3456
+10 20 30 10 0 0 0 255 1 255
 >_____seq__2_
 42
 > desc3
 0 0 0 0 0 0 0
 >
-1 2 3 4 5 6 777
->
-55 10 0 999 1 1 8 775 40 10 10 0
+55 10 0 99 1 1 8 77 40 10 10 0
 >
 10 9 8 7 6
 >proteinseq  detailed description 		with  new  lines   
-42 42 442 442 42 42 42 42 42 43
+42 42 255 255 42 42 42 42 42 43
diff --git a/skbio/io/format/tests/data/qual_6_ws_lines_start_of_file b/skbio/io/format/tests/data/qual_6_ws_lines_start_of_file
new file mode 100644
index 0000000..a8a367c
--- /dev/null
+++ b/skbio/io/format/tests/data/qual_6_ws_lines_start_of_file
@@ -0,0 +1,18 @@
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+>seq1 desc1
+10 20 30 10 0 0 0 255 1 255
+>_____seq__2_
+42
+> desc3
+0 0 0 0 0 0 0
+>
+55 10 0 99 1 1 8 77 40 10 10 0
+>
+10 9 8 7 6
+>proteinseq  detailed description 		with  new  lines   
+42 42 255 255 42 42 42 42 42 43
diff --git a/skbio/io/tests/data/qual_multi_seq b/skbio/io/format/tests/data/qual_blank_lines_between_records
similarity index 51%
copy from skbio/io/tests/data/qual_multi_seq
copy to skbio/io/format/tests/data/qual_blank_lines_between_records
index 392dfe7..e4e3992 100644
--- a/skbio/io/tests/data/qual_multi_seq
+++ b/skbio/io/format/tests/data/qual_blank_lines_between_records
@@ -1,14 +1,22 @@
 >seq1 desc1
-10 20 30 10 0 0 0 88888 1 3456
+10 20 30 10 0 0 0 255 1 255
 >_____seq__2_
 42
 > desc3
 0 0 0 0 0 0 0
+
+
+
+
+
 >
-1 2 3 4 5 6 777
->
-55 10 0 999 1 1 8 775 40 10 10 0
+55 10 0 99 1 1 8 77 40 10 10 0
 >
 10 9 8 7 6
+
+
+
+
+
 >proteinseq  detailed description 		with  new  lines   
-42 42 442 442 42 42 42 42 42 43
+42 42 255 255 42 42 42 42 42 43
diff --git a/skbio/io/tests/data/qual_multi_seq b/skbio/io/format/tests/data/qual_blanks_end_of_file
similarity index 51%
copy from skbio/io/tests/data/qual_multi_seq
copy to skbio/io/format/tests/data/qual_blanks_end_of_file
index 392dfe7..aea7a0c 100644
--- a/skbio/io/tests/data/qual_multi_seq
+++ b/skbio/io/format/tests/data/qual_blanks_end_of_file
@@ -1,14 +1,19 @@
 >seq1 desc1
-10 20 30 10 0 0 0 88888 1 3456
+10 20 30 10 0 0 0 255 1 255
 >_____seq__2_
 42
 > desc3
 0 0 0 0 0 0 0
 >
-1 2 3 4 5 6 777
->
-55 10 0 999 1 1 8 775 40 10 10 0
+55 10 0 99 1 1 8 77 40 10 10 0
 >
 10 9 8 7 6
 >proteinseq  detailed description 		with  new  lines   
-42 42 442 442 42 42 42 42 42 43
+42 42 255 255 42 42 42 42 42 43
+
+
+
+
+
+
+
diff --git a/skbio/io/tests/data/qual_description_newline_replacement_empty_str b/skbio/io/format/tests/data/qual_description_newline_replacement_empty_str
similarity index 69%
rename from skbio/io/tests/data/qual_description_newline_replacement_empty_str
rename to skbio/io/format/tests/data/qual_description_newline_replacement_empty_str
index d1ec06d..a37e029 100644
--- a/skbio/io/tests/data/qual_description_newline_replacement_empty_str
+++ b/skbio/io/format/tests/data/qual_description_newline_replacement_empty_str
@@ -1,4 +1,4 @@
 >proteinseq detaileddescription 		with  newlines
-42 42 442 442 42 42 42 42 42 43
+42 42 255 255 42 42 42 42 42 43
 >foo
 0 1 2 3 4 5 6 7 8
diff --git a/skbio/io/tests/data/qual_description_newline_replacement_multi_char b/skbio/io/format/tests/data/qual_description_newline_replacement_multi_char
similarity index 76%
rename from skbio/io/tests/data/qual_description_newline_replacement_multi_char
rename to skbio/io/format/tests/data/qual_description_newline_replacement_multi_char
index 567f300..a3c00ce 100644
--- a/skbio/io/tests/data/qual_description_newline_replacement_multi_char
+++ b/skbio/io/format/tests/data/qual_description_newline_replacement_multi_char
@@ -1,4 +1,4 @@
 >proteinseq :-)detailed:-)description 		with  new:-):-)lines:-):-):-)
-42 42 442 442 42 42 42 42 42 43
+42 42 255 255 42 42 42 42 42 43
 >foo :-):-):-):-)
 0 1 2 3 4 5 6 7 8
diff --git a/skbio/io/tests/data/qual_description_newline_replacement_none b/skbio/io/format/tests/data/qual_description_newline_replacement_none
similarity index 72%
rename from skbio/io/tests/data/qual_description_newline_replacement_none
rename to skbio/io/format/tests/data/qual_description_newline_replacement_none
index 861548f..68b3f4c 100644
--- a/skbio/io/tests/data/qual_description_newline_replacement_none
+++ b/skbio/io/format/tests/data/qual_description_newline_replacement_none
@@ -6,7 +6,7 @@ lines
 
 
 
-42 42 442 442 42 42 42 42 42 43
+42 42 255 255 42 42 42 42 42 43
 >foo 
 
 
diff --git a/skbio/io/tests/data/qual_id_whitespace_replacement_empty_str b/skbio/io/format/tests/data/qual_id_whitespace_replacement_empty_str
similarity index 100%
rename from skbio/io/tests/data/qual_id_whitespace_replacement_empty_str
rename to skbio/io/format/tests/data/qual_id_whitespace_replacement_empty_str
diff --git a/skbio/io/tests/data/qual_id_whitespace_replacement_multi_char b/skbio/io/format/tests/data/qual_id_whitespace_replacement_multi_char
similarity index 100%
rename from skbio/io/tests/data/qual_id_whitespace_replacement_multi_char
rename to skbio/io/format/tests/data/qual_id_whitespace_replacement_multi_char
diff --git a/skbio/io/tests/data/qual_id_whitespace_replacement_none b/skbio/io/format/tests/data/qual_id_whitespace_replacement_none
similarity index 100%
rename from skbio/io/tests/data/qual_id_whitespace_replacement_none
rename to skbio/io/format/tests/data/qual_id_whitespace_replacement_none
diff --git a/skbio/io/tests/data/qual_invalid_blank_line b/skbio/io/format/tests/data/qual_invalid_blank_line_after_header
similarity index 100%
copy from skbio/io/tests/data/qual_invalid_blank_line
copy to skbio/io/format/tests/data/qual_invalid_blank_line_after_header
index 2201875..56be7b8 100644
--- a/skbio/io/tests/data/qual_invalid_blank_line
+++ b/skbio/io/format/tests/data/qual_invalid_blank_line_after_header
@@ -1,8 +1,8 @@
 >s_e_q_1 desc 1
 1 2 3 4
 >s_e_q_2          desc 2   
- 42    41 39 40 
 
+ 42    41 39 40 
 >s_e_q_3 desc 3
 100 0
  1
diff --git a/skbio/io/tests/data/qual_invalid_blank_line b/skbio/io/format/tests/data/qual_invalid_blank_line_within_seq
similarity index 82%
rename from skbio/io/tests/data/qual_invalid_blank_line
rename to skbio/io/format/tests/data/qual_invalid_blank_line_within_seq
index 2201875..3fe1c23 100644
--- a/skbio/io/tests/data/qual_invalid_blank_line
+++ b/skbio/io/format/tests/data/qual_invalid_blank_line_within_seq
@@ -1,8 +1,9 @@
 >s_e_q_1 desc 1
 1 2 3 4
 >s_e_q_2          desc 2   
- 42    41 39 40 
+ 42    41 
 
+39 40 
 >s_e_q_3 desc 3
 100 0
  1
diff --git a/skbio/io/tests/data/qual_invalid_missing_qual_scores_middle b/skbio/io/format/tests/data/qual_invalid_blank_sequence
similarity index 98%
copy from skbio/io/tests/data/qual_invalid_missing_qual_scores_middle
copy to skbio/io/format/tests/data/qual_invalid_blank_sequence
index ef86125..4d27a38 100644
--- a/skbio/io/tests/data/qual_invalid_missing_qual_scores_middle
+++ b/skbio/io/format/tests/data/qual_invalid_blank_sequence
@@ -1,6 +1,7 @@
 >s_e_q_1 desc 1
 1 2 3 4
 >s_e_q_2          desc 2   
+
 >s_e_q_3 desc 3
 100 0
  1
diff --git a/skbio/io/tests/data/qual_invalid_legacy_format b/skbio/io/format/tests/data/qual_invalid_legacy_format
similarity index 100%
rename from skbio/io/tests/data/qual_invalid_legacy_format
rename to skbio/io/format/tests/data/qual_invalid_legacy_format
diff --git a/skbio/io/tests/data/qual_invalid_missing_header b/skbio/io/format/tests/data/qual_invalid_missing_header
similarity index 100%
rename from skbio/io/tests/data/qual_invalid_missing_header
rename to skbio/io/format/tests/data/qual_invalid_missing_header
diff --git a/skbio/io/tests/data/qual_invalid_missing_qual_scores_first b/skbio/io/format/tests/data/qual_invalid_missing_qual_scores_first
similarity index 100%
rename from skbio/io/tests/data/qual_invalid_missing_qual_scores_first
rename to skbio/io/format/tests/data/qual_invalid_missing_qual_scores_first
diff --git a/skbio/io/tests/data/qual_invalid_missing_qual_scores_last b/skbio/io/format/tests/data/qual_invalid_missing_qual_scores_last
similarity index 100%
rename from skbio/io/tests/data/qual_invalid_missing_qual_scores_last
rename to skbio/io/format/tests/data/qual_invalid_missing_qual_scores_last
diff --git a/skbio/io/tests/data/qual_invalid_missing_qual_scores_middle b/skbio/io/format/tests/data/qual_invalid_missing_qual_scores_middle
similarity index 100%
copy from skbio/io/tests/data/qual_invalid_missing_qual_scores_middle
copy to skbio/io/format/tests/data/qual_invalid_missing_qual_scores_middle
diff --git a/skbio/io/tests/data/qual_invalid_qual_scores_float b/skbio/io/format/tests/data/qual_invalid_qual_scores_float
similarity index 100%
rename from skbio/io/tests/data/qual_invalid_qual_scores_float
rename to skbio/io/format/tests/data/qual_invalid_qual_scores_float
diff --git a/skbio/io/tests/data/qual_invalid_qual_scores_negative b/skbio/io/format/tests/data/qual_invalid_qual_scores_negative
similarity index 100%
rename from skbio/io/tests/data/qual_invalid_qual_scores_negative
rename to skbio/io/format/tests/data/qual_invalid_qual_scores_negative
diff --git a/skbio/io/tests/data/qual_3_seqs_defaults b/skbio/io/format/tests/data/qual_invalid_qual_scores_over_255
similarity index 94%
rename from skbio/io/tests/data/qual_3_seqs_defaults
rename to skbio/io/format/tests/data/qual_invalid_qual_scores_over_255
index 7aa4abc..8fbd48d 100644
--- a/skbio/io/tests/data/qual_3_seqs_defaults
+++ b/skbio/io/format/tests/data/qual_invalid_qual_scores_over_255
@@ -5,4 +5,4 @@
 >s_e_q_3 desc 3
 100 0
  1
--42  
+256  
diff --git a/skbio/io/tests/data/qual_invalid_qual_scores_string b/skbio/io/format/tests/data/qual_invalid_qual_scores_string
similarity index 100%
rename from skbio/io/tests/data/qual_invalid_qual_scores_string
rename to skbio/io/format/tests/data/qual_invalid_qual_scores_string
diff --git a/skbio/io/tests/data/qual_invalid_whitespace_only_line b/skbio/io/format/tests/data/qual_invalid_whitespace_line_in_seq
similarity index 84%
copy from skbio/io/tests/data/qual_invalid_whitespace_only_line
copy to skbio/io/format/tests/data/qual_invalid_whitespace_line_in_seq
index cd51bf4..68a9f44 100644
--- a/skbio/io/tests/data/qual_invalid_whitespace_only_line
+++ b/skbio/io/format/tests/data/qual_invalid_whitespace_line_in_seq
@@ -1,8 +1,9 @@
 >s_e_q_1 desc 1
 1 2 3 4
 >s_e_q_2          desc 2   
- 42    41 39 40 
+ 42    41 
 		     	   
+39 40 
 >s_e_q_3 desc 3
 100 0
  1
diff --git a/skbio/io/tests/data/qual_invalid_missing_qual_scores_middle b/skbio/io/format/tests/data/qual_invalid_whitespace_only_sequence
similarity index 87%
rename from skbio/io/tests/data/qual_invalid_missing_qual_scores_middle
rename to skbio/io/format/tests/data/qual_invalid_whitespace_only_sequence
index ef86125..3a63a0f 100644
--- a/skbio/io/tests/data/qual_invalid_missing_qual_scores_middle
+++ b/skbio/io/format/tests/data/qual_invalid_whitespace_only_sequence
@@ -1,6 +1,7 @@
 >s_e_q_1 desc 1
 1 2 3 4
 >s_e_q_2          desc 2   
+		     	   
 >s_e_q_3 desc 3
 100 0
  1
diff --git a/skbio/io/tests/data/qual_invalid_whitespace_only_line b/skbio/io/format/tests/data/qual_invalid_ws_line_after_header
similarity index 100%
rename from skbio/io/tests/data/qual_invalid_whitespace_only_line
rename to skbio/io/format/tests/data/qual_invalid_ws_line_after_header
index cd51bf4..f077d48 100644
--- a/skbio/io/tests/data/qual_invalid_whitespace_only_line
+++ b/skbio/io/format/tests/data/qual_invalid_ws_line_after_header
@@ -1,8 +1,8 @@
 >s_e_q_1 desc 1
 1 2 3 4
 >s_e_q_2          desc 2   
- 42    41 39 40 
 		     	   
+ 42    41 39 40 
 >s_e_q_3 desc 3
 100 0
  1
diff --git a/skbio/io/tests/data/qual_max_width_1 b/skbio/io/format/tests/data/qual_max_width_1
similarity index 74%
rename from skbio/io/tests/data/qual_max_width_1
rename to skbio/io/format/tests/data/qual_max_width_1
index 719dd85..fa164a9 100644
--- a/skbio/io/tests/data/qual_max_width_1
+++ b/skbio/io/format/tests/data/qual_max_width_1
@@ -6,6 +6,6 @@
 0
 0
 0
-88888
+255
 1
-3456
+255
diff --git a/skbio/io/tests/data/qual_max_width_5 b/skbio/io/format/tests/data/qual_max_width_5
similarity index 74%
rename from skbio/io/tests/data/qual_max_width_5
rename to skbio/io/format/tests/data/qual_max_width_5
index 708f97b..a524924 100644
--- a/skbio/io/tests/data/qual_max_width_5
+++ b/skbio/io/format/tests/data/qual_max_width_5
@@ -2,9 +2,8 @@
 10 20
 30 10
 0 0 0
-88888
-1
-3456
+255 1
+255
 >_____seq__2_
 42
 > desc3
@@ -12,23 +11,19 @@
 0 0 0
 0
 >
-1 2 3
-4 5 6
-777
->
 55 10
-0 999
+0 99
 1 1 8
-775
-40 10
-10 0
+77 40
+10 10
+0
 >
 10 9
 8 7 6
 >proteinseq  detailed description 		with  new  lines   
 42 42
-442
-442
+255
+255
 42 42
 42 42
 42 43
diff --git a/skbio/io/tests/data/qual_multi_seq b/skbio/io/format/tests/data/qual_multi_seq
similarity index 51%
rename from skbio/io/tests/data/qual_multi_seq
rename to skbio/io/format/tests/data/qual_multi_seq
index 392dfe7..9293747 100644
--- a/skbio/io/tests/data/qual_multi_seq
+++ b/skbio/io/format/tests/data/qual_multi_seq
@@ -1,14 +1,12 @@
 >seq1 desc1
-10 20 30 10 0 0 0 88888 1 3456
+10 20 30 10 0 0 0 255 1 255
 >_____seq__2_
 42
 > desc3
 0 0 0 0 0 0 0
 >
-1 2 3 4 5 6 777
->
-55 10 0 999 1 1 8 775 40 10 10 0
+55 10 0 99 1 1 8 77 40 10 10 0
 >
 10 9 8 7 6
 >proteinseq  detailed description 		with  new  lines   
-42 42 442 442 42 42 42 42 42 43
+42 42 255 255 42 42 42 42 42 43
diff --git a/skbio/io/tests/data/qual_multi_seq_roundtrip b/skbio/io/format/tests/data/qual_multi_seq_roundtrip
similarity index 100%
rename from skbio/io/tests/data/qual_multi_seq_roundtrip
rename to skbio/io/format/tests/data/qual_multi_seq_roundtrip
diff --git a/skbio/io/tests/data/qual_prot_seqs_odd_labels b/skbio/io/format/tests/data/qual_prot_seqs_odd_labels
similarity index 90%
rename from skbio/io/tests/data/qual_prot_seqs_odd_labels
rename to skbio/io/format/tests/data/qual_prot_seqs_odd_labels
index 0a2443d..609cc60 100644
--- a/skbio/io/tests/data/qual_prot_seqs_odd_labels
+++ b/skbio/io/format/tests/data/qual_prot_seqs_odd_labels
@@ -5,4 +5,4 @@
 1
  2
   33  
-   123456789
+   123
diff --git a/skbio/io/tests/data/qual_sequence_collection_different_type b/skbio/io/format/tests/data/qual_sequence_collection_different_type
similarity index 89%
rename from skbio/io/tests/data/qual_sequence_collection_different_type
rename to skbio/io/format/tests/data/qual_sequence_collection_different_type
index b61c4fb..936141f 100644
--- a/skbio/io/tests/data/qual_sequence_collection_different_type
+++ b/skbio/io/format/tests/data/qual_sequence_collection_different_type
@@ -3,4 +3,4 @@
 >rnaseq-1 rnaseq desc 1  
 10 9 10
 >rnaseq-2        rnaseq desc 2
-9 99 999
+9 99 99
diff --git a/skbio/io/tests/data/qual_single_bio_seq_non_defaults b/skbio/io/format/tests/data/qual_single_bio_seq_non_defaults
similarity index 100%
rename from skbio/io/tests/data/qual_single_bio_seq_non_defaults
rename to skbio/io/format/tests/data/qual_single_bio_seq_non_defaults
diff --git a/skbio/io/tests/data/qual_single_dna_seq_non_defaults b/skbio/io/format/tests/data/qual_single_dna_seq_non_defaults
similarity index 100%
rename from skbio/io/tests/data/qual_single_dna_seq_non_defaults
rename to skbio/io/format/tests/data/qual_single_dna_seq_non_defaults
diff --git a/skbio/io/tests/data/qual_single_nuc_seq_non_defaults b/skbio/io/format/tests/data/qual_single_nuc_seq_non_defaults
similarity index 100%
rename from skbio/io/tests/data/qual_single_nuc_seq_non_defaults
rename to skbio/io/format/tests/data/qual_single_nuc_seq_non_defaults
diff --git a/skbio/io/tests/data/qual_single_prot_seq_non_defaults b/skbio/io/format/tests/data/qual_single_prot_seq_non_defaults
similarity index 100%
rename from skbio/io/tests/data/qual_single_prot_seq_non_defaults
rename to skbio/io/format/tests/data/qual_single_prot_seq_non_defaults
diff --git a/skbio/io/tests/data/qual_single_rna_seq_non_defaults b/skbio/io/format/tests/data/qual_single_rna_seq_non_defaults
similarity index 100%
rename from skbio/io/tests/data/qual_single_rna_seq_non_defaults
rename to skbio/io/format/tests/data/qual_single_rna_seq_non_defaults
diff --git a/skbio/io/format/tests/data/qual_single_seq b/skbio/io/format/tests/data/qual_single_seq
new file mode 100644
index 0000000..44bc8cf
--- /dev/null
+++ b/skbio/io/format/tests/data/qual_single_seq
@@ -0,0 +1,2 @@
+>seq1 desc1
+10 20 30 10 0 0 0 255 1 255
diff --git a/skbio/io/format/tests/data/qual_ws_lines_between_records b/skbio/io/format/tests/data/qual_ws_lines_between_records
new file mode 100644
index 0000000..996127b
--- /dev/null
+++ b/skbio/io/format/tests/data/qual_ws_lines_between_records
@@ -0,0 +1,22 @@
+>seq1 desc1
+10 20 30 10 0 0 0 255 1 255
+>_____seq__2_
+42
+> desc3
+0 0 0 0 0 0 0
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+>
+55 10 0 99 1 1 8 77 40 10 10 0
+>
+10 9 8 7 6
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+>proteinseq  detailed description 		with  new  lines   
+42 42 255 255 42 42 42 42 42 43
diff --git a/skbio/io/format/tests/data/qual_ws_lines_end_of_file b/skbio/io/format/tests/data/qual_ws_lines_end_of_file
new file mode 100644
index 0000000..e2af590
--- /dev/null
+++ b/skbio/io/format/tests/data/qual_ws_lines_end_of_file
@@ -0,0 +1,20 @@
+>seq1 desc1
+10 20 30 10 0 0 0 255 1 255
+>_____seq__2_
+42
+> desc3
+0 0 0 0 0 0 0
+>
+55 10 0 99 1 1 8 77 40 10 10 0
+>
+10 9 8 7 6
+>proteinseq  detailed description 		with  new  lines   
+42 42 255 255 42 42 42 42 42 43
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
+		     	   
diff --git a/skbio/io/tests/data/sanger_full_range_as_illumina.fastq b/skbio/io/format/tests/data/sanger_full_range_as_illumina.fastq
similarity index 100%
rename from skbio/io/tests/data/sanger_full_range_as_illumina.fastq
rename to skbio/io/format/tests/data/sanger_full_range_as_illumina.fastq
diff --git a/skbio/io/tests/data/sanger_full_range_as_sanger.fastq b/skbio/io/format/tests/data/sanger_full_range_as_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/sanger_full_range_as_sanger.fastq
rename to skbio/io/format/tests/data/sanger_full_range_as_sanger.fastq
diff --git a/skbio/io/tests/data/sanger_full_range_original_sanger.fastq b/skbio/io/format/tests/data/sanger_full_range_original_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/sanger_full_range_original_sanger.fastq
rename to skbio/io/format/tests/data/sanger_full_range_original_sanger.fastq
diff --git a/skbio/io/tests/data/solexa_full_range_original_solexa.fastq b/skbio/io/format/tests/data/solexa_full_range_original_solexa.fastq
similarity index 100%
rename from skbio/io/tests/data/solexa_full_range_original_solexa.fastq
rename to skbio/io/format/tests/data/solexa_full_range_original_solexa.fastq
diff --git a/skbio/io/tests/data/tsv_10_fields b/skbio/io/format/tests/data/tsv_10_fields
similarity index 100%
rename from skbio/io/tests/data/tsv_10_fields
rename to skbio/io/format/tests/data/tsv_10_fields
diff --git a/skbio/io/tests/data/tsv_8_fields b/skbio/io/format/tests/data/tsv_8_fields
similarity index 100%
rename from skbio/io/tests/data/tsv_8_fields
rename to skbio/io/format/tests/data/tsv_8_fields
diff --git a/skbio/io/tests/data/whitespace_only b/skbio/io/format/tests/data/whitespace_only
similarity index 100%
rename from skbio/io/tests/data/whitespace_only
rename to skbio/io/format/tests/data/whitespace_only
diff --git a/skbio/io/tests/data/wrapping_as_illumina.fastq b/skbio/io/format/tests/data/wrapping_as_illumina.fastq
similarity index 100%
rename from skbio/io/tests/data/wrapping_as_illumina.fastq
rename to skbio/io/format/tests/data/wrapping_as_illumina.fastq
diff --git a/skbio/io/tests/data/wrapping_as_sanger.fastq b/skbio/io/format/tests/data/wrapping_as_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/wrapping_as_sanger.fastq
rename to skbio/io/format/tests/data/wrapping_as_sanger.fastq
diff --git a/skbio/io/tests/data/wrapping_original_sanger.fastq b/skbio/io/format/tests/data/wrapping_original_sanger.fastq
similarity index 100%
rename from skbio/io/tests/data/wrapping_original_sanger.fastq
rename to skbio/io/format/tests/data/wrapping_original_sanger.fastq
diff --git a/skbio/io/tests/test_base.py b/skbio/io/format/tests/test_base.py
similarity index 79%
rename from skbio/io/tests/test_base.py
rename to skbio/io/format/tests/test_base.py
index ff7d429..441c039 100644
--- a/skbio/io/tests/test_base.py
+++ b/skbio/io/format/tests/test_base.py
@@ -7,44 +7,20 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
+
+import six
 from future.builtins import range, zip
 
 import unittest
 
 import numpy.testing as npt
+import numpy as np
 
-from skbio import BiologicalSequence, DNASequence, RNASequence
-from skbio.io._base import (_chunk_str, _decode_qual_to_phred,
-                            _encode_phred_to_qual, _get_nth_sequence,
-                            _parse_fasta_like_header,
-                            _format_fasta_like_records)
-
-
-class ChunkStrTests(unittest.TestCase):
-    def test_even_split(self):
-        self.assertEqual(_chunk_str('abcdef', 6, ' '), 'abcdef')
-        self.assertEqual(_chunk_str('abcdef', 3, ' '), 'abc def')
-        self.assertEqual(_chunk_str('abcdef', 2, ' '), 'ab cd ef')
-        self.assertEqual(_chunk_str('abcdef', 1, ' '), 'a b c d e f')
-        self.assertEqual(_chunk_str('a', 1, ' '), 'a')
-        self.assertEqual(_chunk_str('abcdef', 2, ''), 'abcdef')
-
-    def test_no_split(self):
-        self.assertEqual(_chunk_str('', 2, '\n'), '')
-        self.assertEqual(_chunk_str('a', 100, '\n'), 'a')
-        self.assertEqual(_chunk_str('abcdef', 42, '|'), 'abcdef')
-
-    def test_uneven_split(self):
-        self.assertEqual(_chunk_str('abcdef', 5, '|'), 'abcde|f')
-        self.assertEqual(_chunk_str('abcdef', 4, '|'), 'abcd|ef')
-        self.assertEqual(_chunk_str('abcdefg', 3, ' - '), 'abc - def - g')
-
-    def test_invalid_n(self):
-        with self.assertRaisesRegexp(ValueError, 'n=0'):
-            _chunk_str('abcdef', 0, ' ')
-
-        with self.assertRaisesRegexp(ValueError, 'n=-42'):
-            _chunk_str('abcdef', -42, ' ')
+from skbio import Sequence, DNA, RNA
+from skbio.io.format._base import (_decode_qual_to_phred,
+                                   _encode_phred_to_qual, _get_nth_sequence,
+                                   _parse_fasta_like_header,
+                                   _format_fasta_like_records)
 
 
 class PhredDecoderTests(unittest.TestCase):
@@ -74,18 +50,18 @@ class PhredDecoderTests(unittest.TestCase):
         self.assertIn("'illumina'", str(cm.exception))
 
     def test_empty_qual_str(self):
-        self.assertEqual(_decode_qual_to_phred('', variant='sanger'), [])
+        npt.assert_equal(_decode_qual_to_phred('', variant='sanger'),
+                         np.array([], dtype=np.uint8))
 
     def test_sanger_variant(self):
         # test entire range of possible ascii chars for sanger
         all_sanger_ascii = ('!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOP'
                             'QRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
         obs = _decode_qual_to_phred(all_sanger_ascii, variant='sanger')
-        self.assertEqual(obs, list(range(94)))
+        npt.assert_equal(obs, np.arange(94))
 
         with self.assertRaises(ValueError) as cm:
             _decode_qual_to_phred('a b', variant='sanger')
-        self.assertIn('-1', str(cm.exception))
         self.assertIn('[0, 93]', str(cm.exception))
 
     def test_illumina13_variant(self):
@@ -94,11 +70,10 @@ class PhredDecoderTests(unittest.TestCase):
                                 'lmnopqrstuvwxyz{|}~')
         obs = _decode_qual_to_phred(all_illumina13_ascii,
                                     variant='illumina1.3')
-        self.assertEqual(obs, list(range(63)))
+        npt.assert_equal(obs, np.arange(63))
 
         with self.assertRaises(ValueError) as cm:
             _decode_qual_to_phred('a!b', variant='illumina1.3')
-        self.assertIn('-31', str(cm.exception))
         self.assertIn('[0, 62]', str(cm.exception))
 
     def test_illumina18_variant(self):
@@ -107,33 +82,29 @@ class PhredDecoderTests(unittest.TestCase):
                                 'MNOPQRSTUVWXYZ[\\]^_')
         obs = _decode_qual_to_phred(all_illumina18_ascii,
                                     variant='illumina1.8')
-        self.assertEqual(obs, list(range(63)))
+        npt.assert_equal(obs, np.arange(63))
 
         with self.assertRaises(ValueError) as cm:
             _decode_qual_to_phred('AaB', variant='illumina1.8')
-        self.assertIn('64', str(cm.exception))
         self.assertIn('[0, 62]', str(cm.exception))
 
     def test_custom_phred_offset(self):
         ascii_chars = '*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\'
         obs = _decode_qual_to_phred(ascii_chars, phred_offset=42)
-        self.assertEqual(obs, list(range(51)))
+        npt.assert_equal(obs, np.arange(51))
 
         with self.assertRaises(ValueError) as cm:
             _decode_qual_to_phred(ascii_chars, phred_offset=43)
-        self.assertIn('-1', str(cm.exception))
         self.assertIn('[0, 83]', str(cm.exception))
 
         with self.assertRaises(ValueError) as cm:
             _decode_qual_to_phred(ascii_chars, phred_offset=0)
         self.assertIn('`phred_offset`', str(cm.exception))
-        self.assertIn('0', str(cm.exception))
         self.assertIn('printable', str(cm.exception))
 
         with self.assertRaises(ValueError) as cm:
             _decode_qual_to_phred(ascii_chars, phred_offset=127)
         self.assertIn('`phred_offset`', str(cm.exception))
-        self.assertIn('127', str(cm.exception))
         self.assertIn('printable', str(cm.exception))
 
 
@@ -277,12 +248,15 @@ class TestParseFASTALikeHeader(unittest.TestCase):
 class TestFormatFASTALikeRecords(unittest.TestCase):
     def setUp(self):
         def generator():
-            yield BiologicalSequence('ACGT', id='', description='',
-                                     quality=range(4))
-            yield RNASequence('GAU', id='  foo \t\t bar ', description='')
-            yield DNASequence('TAG', id='', description='foo\n\n bar\n')
-            yield BiologicalSequence('A', id='foo', description='bar baz',
-                                     quality=[42])
+            yield Sequence('ACGT', metadata={'id': '', 'description': ''},
+                           positional_metadata={'quality': range(4)})
+            yield RNA('GAU', metadata={'id': '  foo \t\t bar ',
+                                       'description': ''})
+            yield DNA('TAG',
+                      metadata={'id': '', 'description': 'foo\n\n bar\n'})
+            yield Sequence('A',
+                           metadata={'id': 'foo', 'description': 'bar baz'},
+                           positional_metadata={'quality': [42]})
         self.gen = generator()
 
     def test_no_replacement(self):
@@ -325,32 +299,32 @@ class TestFormatFASTALikeRecords(unittest.TestCase):
             npt.assert_equal(o, e)
 
     def test_newline_character_in_id_whitespace_replacement(self):
-        with self.assertRaisesRegexp(ValueError, 'Newline character'):
+        with six.assertRaisesRegex(self, ValueError, 'Newline character'):
             list(_format_fasta_like_records(self.gen, '-\n--', ' ', False))
 
     def test_newline_character_in_description_newline_replacement(self):
-        with self.assertRaisesRegexp(ValueError, 'Newline character'):
+        with six.assertRaisesRegex(self, ValueError, 'Newline character'):
             list(_format_fasta_like_records(self.gen, None, 'a\nb', False))
 
     def test_empty_sequence(self):
         def blank_seq_gen():
-            for seq in (DNASequence('A'), BiologicalSequence(''),
-                        RNASequence('GG')):
+            for seq in (DNA('A'), Sequence(''),
+                        RNA('GG')):
                 yield seq
 
-        with self.assertRaisesRegexp(ValueError, '2nd.*empty'):
+        with six.assertRaisesRegex(self, ValueError, '2nd.*empty'):
             list(_format_fasta_like_records(blank_seq_gen(), None, None,
                                             False))
 
     def test_missing_quality_scores(self):
         def missing_qual_gen():
-            for seq in (RNASequence('A', quality=[42]),
-                        BiologicalSequence('AG'),
-                        DNASequence('GG', quality=[41, 40])):
+            for seq in (RNA('A', positional_metadata={'quality': [42]}),
+                        Sequence('AG'),
+                        DNA('GG', positional_metadata={'quality': [41, 40]})):
                 yield seq
 
-        with self.assertRaisesRegexp(ValueError,
-                                     '2nd sequence.*quality scores'):
+        with six.assertRaisesRegex(self, ValueError,
+                                   '2nd sequence.*quality scores'):
             list(_format_fasta_like_records(missing_qual_gen(), '-', '-',
                                             True))
 
diff --git a/skbio/io/tests/test_clustal.py b/skbio/io/format/tests/test_clustal.py
similarity index 68%
rename from skbio/io/tests/test_clustal.py
rename to skbio/io/format/tests/test_clustal.py
index 7a022cf..32fe0a5 100644
--- a/skbio/io/tests/test_clustal.py
+++ b/skbio/io/format/tests/test_clustal.py
@@ -1,58 +1,43 @@
-#!/usr/bin/env python
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
+
 from __future__ import absolute_import, division, print_function
-from six import StringIO
 
+from io import StringIO
 from unittest import TestCase, main
 
-from skbio.io.clustal import (_clustal_to_alignment, _alignment_to_clustal,
-                              _clustal_sniffer)
-from skbio.io.clustal import (_is_clustal_seq_line, last_space,
-                              _delete_trailing_number, _check_length,
-                              _label_line_parser)
+from skbio.io.format.clustal import (
+    _clustal_to_alignment, _alignment_to_clustal, _clustal_sniffer,
+    _is_clustal_seq_line, _delete_trailing_number, _check_length,
+    _label_line_parser)
 
 from skbio.io import ClustalFormatError
-from skbio.parse.record import DelimitedSplitter
 
 
 class ClustalHelperTests(TestCase):
-
-    """Tests of top-level functions."""
-
     def test_label_line_parser(self):
-        last_space = DelimitedSplitter(None, -1)
-        self.assertEquals(_label_line_parser(StringIO('abc\tucag'),
-                                             last_space),
-                          ({"abc": ["ucag"]}, ['abc']))
+        self.assertEqual(_label_line_parser(StringIO(u'abc\tucag')),
+                         ({"abc": ["ucag"]}, ['abc']))
 
         with self.assertRaises(ClustalFormatError):
-            _label_line_parser(StringIO('abctucag'), last_space)
+            _label_line_parser(StringIO(u'abctucag'))
 
     def test_is_clustal_seq_line(self):
-
         ic = _is_clustal_seq_line
-        assert ic('abc')
-        assert ic('abc  def')
-        assert not ic('CLUSTAL')
-        assert not ic('CLUSTAL W fsdhicjkjsdk')
-        assert not ic('  *   *')
-        assert not ic(' abc def')
-        assert not ic('MUSCLE (3.41) multiple sequence alignment')
-
-    def test_last_space(self):
-
-        self.assertEqual(last_space('a\t\t\t  b    c'), ['a b', 'c'])
-        self.assertEqual(last_space('xyz'), ['xyz'])
-        self.assertEqual(last_space('  a b'), ['a', 'b'])
+        self.assertTrue(ic('abc'))
+        self.assertTrue(ic('abc  def'))
+        self.assertFalse(ic('CLUSTAL'))
+        self.assertFalse(ic('CLUSTAL W fsdhicjkjsdk'))
+        self.assertFalse(ic('  *   *'))
+        self.assertFalse(ic(' abc def'))
+        self.assertFalse(ic('MUSCLE (3.41) multiple sequence alignment'))
 
     def test_delete_trailing_number(self):
-
         dtn = _delete_trailing_number
         self.assertEqual(dtn('abc'), 'abc')
         self.assertEqual(dtn('a b c'), 'a b c')
@@ -93,11 +78,12 @@ class ClustalIOTests(TestCase):
 
     def setUp(self):
         self.valid_clustal_out = [
-            StringIO('abc\tucag'),
-            StringIO('abc\tuuu\ndef\tccc\n\n    ***\n\ndef ggg\nabc\taaa\n'),
-            StringIO('\n'.join(['abc uca', 'def ggg ccc'])),
-            StringIO('\n'.join(['abc uca ggg', 'def ggg ccc'])),
-            StringIO("""CLUSTAL
+            StringIO(u'CLUSTAL\n\nabc\tucag'),
+            StringIO(u'CLUSTAL\n\nabc\tuuu\ndef\tccc\n\n    ***\n\ndef ggg\nab'
+                     'c\taaa\n'),
+            StringIO(u'\n'.join(['CLUSTAL\n', 'abc uca', 'def ggg ccc'])),
+            StringIO(u'\n'.join(['CLUSTAL\n', 'abc uca ggg', 'def ggg ccc'])),
+            StringIO(u"""CLUSTAL
 
 
 abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -106,7 +92,7 @@ xyz             ------------------------------------------------------------
 
 
 """),
-            StringIO("""CLUSTAL
+            StringIO(u"""CLUSTAL
 
 
 abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -118,7 +104,7 @@ abc             GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC
 def             -----------------------------------------CGCGAUGCAUGCAU-CGAU
 xyz             -------------------------------------CAUGCAUCGUACGUACGCAUGAC
 """),
-            StringIO("""CLUSTAL W (1.82) multiple sequence alignment
+            StringIO(u"""CLUSTAL W (1.82) multiple sequence alignment
 
 
 abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -134,7 +120,7 @@ xyz             -------------------------------------CAUGCAUCGUACGUACGCAUGAC
 abc             UGACUAGUCAGCUAGCAUCGAUCAGU
 def             CGAUCAGUCAGUCGAU----------
 xyz             UGCUGCAUCA----------------"""),
-            StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+            StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
 
 
 abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA 60
@@ -152,27 +138,27 @@ def             CGAUCAGUCAGUCGAU---------- 34
 xyz             UGCUGCAUCA---------------- 33
                 *     ***""")
             ]
-        self.invalid_clustal_out = [StringIO('\n'.join(['dshfjsdfhdfsj',
-                                                        'hfsdjksdfhjsdf'])),
-                                    StringIO('\n'.join(['hfsdjksdfhjsdf'])),
-                                    StringIO('\n'.join(['dshfjsdfhdfsj',
-                                                        'dshfjsdfhdfsj',
-                                                        'hfsdjksdfhjsdf'])),
-                                    StringIO('\n'.join(['dshfjsdfhdfsj',
-                                                        '\t',
-                                                        'hfsdjksdfhjsdf'])),
-                                    StringIO('\n'.join(['dshfj\tdfhdfsj',
-                                                        'hfsdjksdfhjsdf'])),
-                                    StringIO('\n'.join(['dshfjsdfhdfsj',
-                                                        'hfsdjk\tdfhjsdf'])),
-                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+        self.invalid_clustal_out = [StringIO(u'\n'.join(['dshfjsdfhdfsj',
+                                                         'hfsdjksdfhjsdf'])),
+                                    StringIO(u'\n'.join(['hfsdjksdfhjsdf'])),
+                                    StringIO(u'\n'.join(['dshfjsdfhdfsj',
+                                                         'dshfjsdfhdfsj',
+                                                         'hfsdjksdfhjsdf'])),
+                                    StringIO(u'\n'.join(['dshfjsdfhdfsj',
+                                                         '\t',
+                                                         'hfsdjksdfhjsdf'])),
+                                    StringIO(u'\n'.join(['dshfj\tdfhdfsj',
+                                                         'hfsdjksdfhjsdf'])),
+                                    StringIO(u'\n'.join(['dshfjsdfhdfsj',
+                                                         'hfsdjk\tdfhjsdf'])),
+                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
 
 
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
 ------------------------------------------------------------
 adk -----GGGGGGG------------------------------------------------
 """),
-                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
 
 
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -182,7 +168,7 @@ adk -----GGGGGGG------------------------------------------------
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
 adk -----GGGGGGG---------------------------------------------
 """),
-                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
 
 
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -193,7 +179,7 @@ adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCA
 adk -----GGGGGGG---------------------------------------------
 """),
 
-                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
 
 
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -201,7 +187,7 @@ adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
 adk -----GGGGGGG------------------------------------------------
 """),
 
-                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
+                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
 
 
 GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -224,7 +210,7 @@ UGCUGCAUCA---------------- 33
         self.assertEqual(dict(result), {})
 
     def test_alignment_to_clustal_with_bad_input(self):
-        BAD = StringIO('\n'.join(['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']))
+        BAD = StringIO(u'\n'.join(['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']))
         result = _clustal_to_alignment(BAD, strict=False)
         self.assertEqual(dict(result), {})
         # should fail unless we turned strict processing off
@@ -233,17 +219,13 @@ UGCUGCAUCA---------------- 33
             dict(_clustal_to_alignment(BAD))
 
     def test_valid_alignment_to_clustal_and_clustal_to_alignment(self):
-        import os
         for valid_out in self.valid_clustal_out:
-            fname = "test.aln"
-            testfile = open(fname, 'w')
             result_before = _clustal_to_alignment(valid_out)
-            _alignment_to_clustal(result_before, testfile)
-            testfile.close()
-            testfile = open(fname, 'r')
-            result_after = _clustal_to_alignment(testfile)
-            self.assertEquals(result_before, result_after)
-        os.remove(fname)
+            with StringIO() as fh:
+                _alignment_to_clustal(result_before, fh)
+                fh.seek(0)
+                result_after = _clustal_to_alignment(fh)
+            self.assertEqual(result_before, result_after)
 
     def test_invalid_alignment_to_clustal_and_clustal_to_alignment(self):
         for invalid_out in self.invalid_clustal_out:
diff --git a/skbio/io/format/tests/test_emptyfile.py b/skbio/io/format/tests/test_emptyfile.py
new file mode 100644
index 0000000..af9309f
--- /dev/null
+++ b/skbio/io/format/tests/test_emptyfile.py
@@ -0,0 +1,38 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import unittest
+import io
+
+from skbio.io.format.emptyfile import _empty_file_sniffer
+
+
+class TestEmptyFile(unittest.TestCase):
+    def test_empty(self):
+        res, kw = _empty_file_sniffer(io.StringIO())
+        self.assertTrue(res)
+        self.assertEqual({}, kw)
+
+        res, kw = _empty_file_sniffer(io.StringIO(u"       \n   \t "))
+        self.assertTrue(res)
+        self.assertEqual({}, kw)
+
+    def test_not_empty(self):
+        res, kw = _empty_file_sniffer(io.StringIO(u"a"))
+        self.assertFalse(res)
+        self.assertEqual({}, kw)
+
+        res, kw = _empty_file_sniffer(io.StringIO(u"                  \n \ta"))
+        self.assertFalse(res)
+        self.assertEqual({}, kw)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/io/tests/test_fasta.py b/skbio/io/format/tests/test_fasta.py
similarity index 55%
rename from skbio/io/tests/test_fasta.py
rename to skbio/io/format/tests/test_fasta.py
index e8e6647..285cc51 100644
--- a/skbio/io/tests/test_fasta.py
+++ b/skbio/io/format/tests/test_fasta.py
@@ -8,21 +8,22 @@
 
 from __future__ import absolute_import, division, print_function
 from future.builtins import map, range, zip
-from six import StringIO
+import six
 
+import io
 from unittest import TestCase, main
+from functools import partial
 
-from skbio import (BiologicalSequence, NucleotideSequence, DNA, RNA, Protein,
-                   ProteinSequence, SequenceCollection, Alignment)
-from skbio.sequence import BiologicalSequenceError
-from skbio.io import FASTAFormatError
-from skbio.io.fasta import (
+import numpy as np
+
+from skbio import (Sequence, DNA, RNA, Protein, SequenceCollection, Alignment)
+from skbio.io import FASTAFormatError, QUALFormatError
+from skbio.io.format.fasta import (
     _fasta_sniffer, _fasta_to_generator, _fasta_to_biological_sequence,
-    _fasta_to_nucleotide_sequence, _fasta_to_dna_sequence,
-    _fasta_to_rna_sequence, _fasta_to_protein_sequence,
+    _fasta_to_dna_sequence, _fasta_to_rna_sequence, _fasta_to_protein_sequence,
     _fasta_to_sequence_collection, _fasta_to_alignment, _generator_to_fasta,
-    _biological_sequence_to_fasta, _nucleotide_sequence_to_fasta,
-    _dna_sequence_to_fasta, _rna_sequence_to_fasta, _protein_sequence_to_fasta,
+    _biological_sequence_to_fasta, _dna_sequence_to_fasta,
+    _rna_sequence_to_fasta, _protein_sequence_to_fasta,
     _sequence_collection_to_fasta, _alignment_to_fasta)
 from skbio.util import get_data_path
 
@@ -30,6 +31,11 @@ from skbio.util import get_data_path
 class SnifferTests(TestCase):
     def setUp(self):
         self.positive_fps = list(map(get_data_path, [
+            'fasta_5_blanks_start_of_file',
+            'fasta_5_ws_lines_start_of_file',
+            'fasta_blanks_end_of_file',
+            'fasta_ws_lines_end_of_file',
+            'fasta_blank_lines_between_records',
             'fasta_3_seqs_defaults',
             'fasta_max_width_1',
             'fasta_single_bio_seq_non_defaults',
@@ -44,53 +50,61 @@ class SnifferTests(TestCase):
             'fasta_single_rna_seq_non_defaults',
             'fasta_description_newline_replacement_multi_char',
             'fasta_prot_seqs_odd_labels',
-            'fasta_single_nuc_seq_defaults',
             'fasta_single_seq',
             'fasta_id_whitespace_replacement_empty_str',
             'fasta_sequence_collection_different_type',
-            'fasta_single_nuc_seq_non_defaults',
             'fasta_id_whitespace_replacement_multi_char',
             'fasta_single_bio_seq_defaults',
             'fasta_single_prot_seq_defaults',
             'fasta_10_seqs',
             'fasta_invalid_after_10_seqs',
             'fasta_mixed_qual_scores',
-            'qual_invalid_qual_scores_float',
-            'qual_invalid_qual_scores_string'
+            'qual_3_seqs_non_defaults'
         ]))
 
         self.negative_fps = list(map(get_data_path, [
             'empty',
             'whitespace_only',
             'fasta_invalid_missing_header',
-            'fasta_invalid_blank_line',
-            'fasta_invalid_whitespace_only_line',
+            'fasta_invalid_blank_line_after_header',
+            'fasta_invalid_blank_sequence',
+            'fasta_invalid_blank_line_within_sequence',
+            'fasta_invalid_whitespace_only_line_within_sequence',
+            'fasta_invalid_whitespace_line_after_header',
             'fasta_invalid_missing_seq_data_first',
             'fasta_invalid_missing_seq_data_middle',
             'fasta_invalid_missing_seq_data_last',
             'fasta_invalid_legacy_format',
+            'fasta_invalid_whitespace_only_sequence',
             'fasta_id_whitespace_replacement_none',
             'fasta_description_newline_replacement_none',
+            'fasta_6_blanks_start_of_file',
+            'fasta_6_ws_lines_start_of_file',
             'qual_2_seqs_defaults',
             'qual_3_seqs_defaults',
             'qual_3_seqs_defaults_desc_mismatch',
             'qual_3_seqs_defaults_extra',
             'qual_3_seqs_defaults_id_mismatch',
             'qual_3_seqs_defaults_length_mismatch',
-            'qual_3_seqs_non_defaults',
             'qual_description_newline_replacement_empty_str',
             'qual_description_newline_replacement_multi_char',
             'qual_description_newline_replacement_none',
             'qual_id_whitespace_replacement_empty_str',
             'qual_id_whitespace_replacement_multi_char',
             'qual_id_whitespace_replacement_none',
-            'qual_invalid_blank_line',
+            'qual_invalid_blank_line_within_seq',
             'qual_invalid_legacy_format',
             'qual_invalid_missing_header',
             'qual_invalid_missing_qual_scores_first',
             'qual_invalid_missing_qual_scores_last',
             'qual_invalid_missing_qual_scores_middle',
-            'qual_invalid_whitespace_only_line',
+            'qual_invalid_whitespace_line_in_seq',
+            'qual_invalid_blank_line_after_header',
+            'qual_invalid_blank_sequence',
+            'qual_invalid_whitespace_only_sequence',
+            'qual_invalid_ws_line_after_header',
+            'qual_invalid_qual_scores_float',
+            'qual_invalid_qual_scores_string',
             'qual_max_width_1',
             'qual_max_width_5',
             'qual_multi_seq',
@@ -99,10 +113,17 @@ class SnifferTests(TestCase):
             'qual_sequence_collection_different_type',
             'qual_single_bio_seq_non_defaults',
             'qual_single_dna_seq_non_defaults',
-            'qual_single_nuc_seq_non_defaults',
             'qual_single_prot_seq_non_defaults',
             'qual_single_rna_seq_non_defaults',
-            'qual_single_seq'
+            'qual_single_seq',
+            'qual_ws_lines_between_records',
+            'qual_blank_lines_between_records',
+            'qual_5_blanks_start_of_file',
+            'qual_5_ws_lines_start_of_file',
+            'qual_6_blanks_start_of_file',
+            'qual_6_ws_lines_start_of_file',
+            'qual_blanks_end_of_file',
+            'qual_ws_lines_end_of_file'
         ]))
 
     def test_positives(self):
@@ -122,14 +143,17 @@ class ReaderTests(TestCase):
         # deserialize into the expected generator results
 
         # empty file shouldn't yield sequences
-        self.empty = ([], {}, list(map(get_data_path, ['empty'])),
-                      list(map(get_data_path, ['empty'])))
+        self.empty = ([], {}, list(map(get_data_path, ['empty',
+                                                       'whitespace_only'])),
+                      list(map(get_data_path, ['empty', 'whitespace_only'])))
 
         # single sequence
         self.single = (
-            [BiologicalSequence(
-                'ACGT-acgt.', id='seq1', description='desc1',
-                quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])],
+            [Sequence(
+                'ACGT-acgt.', metadata={'id': 'seq1', 'description': 'desc1'},
+                positional_metadata={'quality':
+                                     np.asarray([10, 20, 30, 10, 0, 0, 0, 255,
+                                                 1, 255], dtype=np.uint8)})],
             {},
             list(map(get_data_path, ['fasta_single_seq',
                                      'fasta_max_width_1'])),
@@ -138,24 +162,60 @@ class ReaderTests(TestCase):
 
         # multiple sequences
         self.multi = (
-            [BiologicalSequence(
-                'ACGT-acgt.', id='seq1', description='desc1',
-                quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456]),
-             BiologicalSequence('A', id='_____seq__2_', quality=[42]),
-             BiologicalSequence(
-                'AACGGuA', description='desc3', quality=[0, 0, 0, 0, 0, 0, 0]),
-             BiologicalSequence('AcGtUTu', quality=[1, 2, 3, 4, 5, 6, 777]),
-             BiologicalSequence(
+            [Sequence(
+                'ACGT-acgt.', metadata={'id': 'seq1', 'description': 'desc1'},
+                positional_metadata={'quality':
+                                     np.asarray([10, 20, 30, 10, 0, 0, 0, 255,
+                                                 1, 255], dtype=np.uint8)}),
+             Sequence('A', metadata={'id': '_____seq__2_', 'description': ''},
+                      positional_metadata={'quality':
+                                           np.asarray([42], dtype=np.uint8)}),
+             Sequence(
+                'AACGGuA', metadata={'id': '', 'description': 'desc3'},
+                positional_metadata={'quality':
+                                     np.asarray([0, 0, 0, 0, 0, 0, 0],
+                                                dtype=np.uint8)}),
+             Sequence(
                 'ACGTTGCAccGG',
-                quality=[55, 10, 0, 999, 1, 1, 8, 775, 40, 10, 10, 0]),
-             BiologicalSequence('ACGUU', quality=[10, 9, 8, 7, 6]),
-             BiologicalSequence(
-                 'pQqqqPPQQQ', id='proteinseq',
-                 description='detailed description \t\twith  new  lines',
-                 quality=[42, 42, 442, 442, 42, 42, 42, 42, 42, 43])],
+                metadata={'id': '', 'description': ''},
+                positional_metadata={'quality':
+                                     np.asarray([55, 10, 0, 99, 1, 1, 8, 77,
+                                                 40, 10, 10, 0],
+                                                dtype=np.uint8)}),
+             Sequence('ACGUU',
+                      metadata={'id': '', 'description': ''},
+                      positional_metadata={'quality':
+                                           np.asarray([10, 9, 8, 7, 6],
+                                                      dtype=np.uint8)}),
+             Sequence(
+                 'pQqqqPPQQQ',
+                 metadata={'id': 'proteinseq',
+                           'description':
+                               'detailed description \t\twith  new  lines'},
+                 positional_metadata={'quality':
+                                      np.asarray([42, 42, 255, 255, 42, 42, 42,
+                                                  42, 42, 43],
+                                                 dtype=np.uint8)})],
             {},
-            list(map(get_data_path, ['fasta_multi_seq', 'fasta_max_width_5'])),
-            list(map(get_data_path, ['qual_multi_seq', 'qual_max_width_5']))
+            list(map(get_data_path, ['fasta_multi_seq', 'fasta_max_width_5',
+                                     'fasta_blank_lines_between_records',
+                                     'fasta_ws_lines_between_records',
+                                     'fasta_5_blanks_start_of_file',
+                                     'fasta_5_ws_lines_start_of_file',
+                                     'fasta_6_blanks_start_of_file',
+                                     'fasta_6_ws_lines_start_of_file',
+                                     'fasta_blanks_end_of_file',
+                                     'fasta_ws_lines_end_of_file'])),
+            list(map(get_data_path, ['qual_multi_seq', 'qual_max_width_5',
+                                     'qual_blank_lines_between_records',
+                                     'qual_ws_lines_between_records',
+                                     'qual_5_blanks_start_of_file',
+                                     'qual_5_ws_lines_start_of_file',
+                                     'qual_6_blanks_start_of_file',
+                                     'qual_6_ws_lines_start_of_file',
+                                     'qual_blanks_end_of_file',
+                                     'qual_ws_lines_end_of_file']))
+
         )
 
         # test constructor parameter, as well as odd labels (label only
@@ -167,30 +227,63 @@ class ReaderTests(TestCase):
         # exactly, only that they need to match exactly after parsing (e.g.,
         # after stripping leading/trailing whitespace from descriptions)
         self.odd_labels_different_type = (
-            [Protein('DEFQfp', quality=[0, 0, 1, 5, 44, 0]),
+            [Protein('DEFQfp',
+                     metadata={'id': '', 'description': ''},
+                     positional_metadata={'quality':
+                                          np.asarray([0, 0, 1, 5, 44, 0],
+                                                     dtype=np.uint8)},
+                     validate=False),
              Protein(
-                 'SKBI', description='skbio', quality=[1, 2, 33, 123456789])],
-            {'constructor': ProteinSequence},
+                 'SKBI', metadata={'id': '', 'description': 'skbio'},
+                 positional_metadata={'quality':
+                                      np.asarray([1, 2, 33, 123],
+                                                 dtype=np.uint8)})],
+            {'constructor': partial(Protein, validate=False)},
             list(map(get_data_path, ['fasta_prot_seqs_odd_labels'])),
             list(map(get_data_path, ['qual_prot_seqs_odd_labels']))
         )
 
         # sequences that can be loaded into a SequenceCollection or Alignment.
-        # they are also a different type than BiologicalSequence in order to
+        # they are also a different type than Sequence in order to
         # exercise the constructor parameter
         self.sequence_collection_different_type = (
-            [RNA('AUG', quality=[20, 20, 21]),
-             RNA('AUC', id='rnaseq-1', description='rnaseq desc 1',
-                 quality=[10, 9, 10]),
-             RNA('AUG', id='rnaseq-2', description='rnaseq desc 2',
-                 quality=[9, 99, 999])],
-            {'constructor': RNA},
+            [RNA('aUG',
+                 metadata={'id': '', 'description': ''},
+                 positional_metadata={'quality':
+                                      np.asarray([20, 20, 21],
+                                                 dtype=np.uint8)},
+                 lowercase='introns'),
+             RNA('AuC',
+                 metadata={'id': 'rnaseq-1', 'description': 'rnaseq desc 1'},
+                 positional_metadata={'quality':
+                                      np.asarray([10, 9, 10], dtype=np.uint8)},
+                 lowercase='introns'),
+             RNA('AUg',
+                 metadata={'id': 'rnaseq-2', 'description': 'rnaseq desc 2'},
+                 positional_metadata={'quality':
+                                      np.asarray([9, 99, 99], dtype=np.uint8)},
+                 lowercase='introns')],
+            {'constructor': partial(RNA, lowercase='introns')},
             list(map(get_data_path,
                      ['fasta_sequence_collection_different_type'])),
             list(map(get_data_path,
                      ['qual_sequence_collection_different_type']))
         )
 
+        self.lowercase_seqs = (
+            [DNA('TAcg',
+                 metadata={'id': 'f-o-o', 'description': 'b_a_r'},
+                 positional_metadata={'quality':
+                                      np.asarray([0, 1, 2, 3],
+                                                 dtype=np.uint8)},
+                 lowercase='introns')],
+            {'constructor': DNA, 'lowercase': 'introns'},
+            list(map(get_data_path,
+                     ['fasta_single_dna_seq_non_defaults'])),
+            list(map(get_data_path,
+                     ['qual_single_dna_seq_non_defaults']))
+        )
+
         # store fasta filepath, kwargs, error type, and expected error message
         # for invalid input.
         #
@@ -202,40 +295,61 @@ class ReaderTests(TestCase):
         # fasta remains in python)
         self.invalid_fps = list(map(lambda e: (get_data_path(e[0]),
                                                e[1], e[2], e[3]), [
-            # whitespace-only fasta and qual
-            ('whitespace_only', {}, FASTAFormatError,
-             'without a header.*FASTA'),
-            ('fasta_3_seqs_defaults',
-             {'qual': get_data_path('whitespace_only')}, FASTAFormatError,
-             'without a header.*QUAL'),
-
             # fasta and qual missing header
             ('fasta_invalid_missing_header', {}, FASTAFormatError,
-             'without a header.*FASTA'),
+             'non-header.*1st'),
             ('fasta_3_seqs_defaults',
              {'qual': get_data_path('qual_invalid_missing_header')},
-             FASTAFormatError, 'without a header.*QUAL'),
+             QUALFormatError, 'non-header.*1st'),
 
-            # fasta and qual with blank line
-            ('fasta_invalid_blank_line', {}, FASTAFormatError,
-             'whitespace-only.*FASTA'),
+            # fasta and qual with blank line within sequence
+            ('fasta_invalid_blank_line_within_sequence', {}, FASTAFormatError,
+             'whitespace-only'),
             ('fasta_3_seqs_defaults',
-             {'qual': get_data_path('qual_invalid_blank_line')},
-             FASTAFormatError, 'whitespace-only.*QUAL'),
+             {'qual': get_data_path('qual_invalid_blank_line_within_seq')},
+             QUALFormatError, 'whitespace-only'),
 
-            # fasta and qual with whitespace-only line
-            ('fasta_invalid_whitespace_only_line', {}, FASTAFormatError,
-             'whitespace-only.*FASTA'),
+            # fasta and qual with blank after header
+            ('fasta_invalid_blank_sequence', {}, FASTAFormatError,
+             'without sequence data'),
             ('fasta_3_seqs_defaults',
-             {'qual': get_data_path('qual_invalid_whitespace_only_line')},
-             FASTAFormatError, 'whitespace-only.*QUAL'),
+             {'qual': get_data_path('qual_invalid_blank_sequence')},
+             QUALFormatError, 'without quality scores'),
+
+            # fasta and qual with whitespace only sequence
+            ('fasta_invalid_whitespace_only_sequence', {}, FASTAFormatError,
+             'without sequence data'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_whitespace_only_sequence')},
+             QUALFormatError, 'without quality scores'),
+
+            # fasta and qual with blank line within sequence
+            ('fasta_invalid_blank_line_after_header', {}, FASTAFormatError,
+             'whitespace-only'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_blank_line_after_header')},
+             QUALFormatError, 'whitespace-only'),
+
+            # fasta and qual with whitespace-only line within sequence
+            ('fasta_invalid_whitespace_only_line_within_sequence',
+             {}, FASTAFormatError, 'whitespace-only'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_whitespace_line_in_seq')},
+             QUALFormatError, 'whitespace-only'),
+
+            # fasta and qual with whitespace-only line after header
+            ('fasta_invalid_whitespace_line_after_header',
+             {}, FASTAFormatError, 'whitespace-only'),
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_ws_line_after_header')},
+             QUALFormatError, 'whitespace-only'),
 
             # fasta and qual missing record data (first record)
             ('fasta_invalid_missing_seq_data_first', {}, FASTAFormatError,
              'without sequence data'),
             ('fasta_3_seqs_defaults',
              {'qual': get_data_path('qual_invalid_missing_qual_scores_first')},
-             FASTAFormatError, 'without quality scores'),
+             QUALFormatError, 'without quality scores'),
 
             # fasta and qual missing record data (middle record)
             ('fasta_invalid_missing_seq_data_middle', {}, FASTAFormatError,
@@ -243,21 +357,21 @@ class ReaderTests(TestCase):
             ('fasta_3_seqs_defaults',
              {'qual':
               get_data_path('qual_invalid_missing_qual_scores_middle')},
-             FASTAFormatError, 'without quality scores'),
+             QUALFormatError, 'without quality scores'),
 
             # fasta and qual missing record data (last record)
             ('fasta_invalid_missing_seq_data_last', {}, FASTAFormatError,
              'without sequence data'),
             ('fasta_3_seqs_defaults',
              {'qual': get_data_path('qual_invalid_missing_qual_scores_last')},
-             FASTAFormatError, 'without quality scores'),
+             QUALFormatError, 'without quality scores'),
 
             # fasta and qual in legacy format (;)
             ('fasta_invalid_legacy_format', {}, FASTAFormatError,
-             'without a header.*FASTA'),
+             'non-header.*1st'),
             ('fasta_3_seqs_defaults',
              {'qual': get_data_path('qual_invalid_legacy_format')},
-             FASTAFormatError, 'without a header.*QUAL'),
+             QUALFormatError, 'non-header.*1st'),
 
             # qual file with an extra record
             ('fasta_3_seqs_defaults',
@@ -284,34 +398,41 @@ class ReaderTests(TestCase):
             # sequence and quality score length mismatch between fasta and qual
             ('fasta_3_seqs_defaults',
              {'qual': get_data_path('qual_3_seqs_defaults_length_mismatch')},
-             BiologicalSequenceError,
-             'Number of Phred quality scores \(3\).*\(4\)'),
+             ValueError,
+             'Number of positional metadata values \(3\) must match the '
+             'number of characters in the sequence \(4\)\.'),
 
             # invalid qual scores (string value can't be converted to integer)
             ('fasta_3_seqs_defaults',
              {'qual': get_data_path('qual_invalid_qual_scores_string')},
-             FASTAFormatError,
+             QUALFormatError,
              'quality scores to integers:\n100 0 1a -42'),
 
             # invalid qual scores (float value can't be converted to integer)
             ('fasta_3_seqs_defaults',
              {'qual': get_data_path('qual_invalid_qual_scores_float')},
-             FASTAFormatError,
+             QUALFormatError,
              'quality scores to integers:\n42    41.0 39 40'),
 
             # invalid qual scores (negative integer)
             ('fasta_3_seqs_defaults',
              {'qual': get_data_path('qual_invalid_qual_scores_negative')},
-             BiologicalSequenceError,
-             'Phred quality scores.*greater than or equal to zero'),
+             QUALFormatError,
+             'Quality scores must be greater than or equal to zero\.'),
+
+            # invalid qual scores (over 255)
+            ('fasta_3_seqs_defaults',
+             {'qual': get_data_path('qual_invalid_qual_scores_over_255')},
+             QUALFormatError,
+             'quality score\(s\) greater than 255'),
 
             # misc. invalid files used elsewhere in the tests
             ('fasta_invalid_after_10_seqs', {}, FASTAFormatError,
              'without sequence data'),
             ('fasta_id_whitespace_replacement_none', {}, FASTAFormatError,
-             'whitespace-only.*FASTA'),
+             'whitespace-only'),
             ('fasta_description_newline_replacement_none', {},
-             FASTAFormatError, 'whitespace-only.*FASTA')
+             FASTAFormatError, 'whitespace-only')
         ]))
 
     # extensive tests for fasta -> generator reader since it is used by all
@@ -320,7 +441,8 @@ class ReaderTests(TestCase):
     def test_fasta_to_generator_valid_files(self):
         test_cases = (self.empty, self.single, self.multi,
                       self.odd_labels_different_type,
-                      self.sequence_collection_different_type)
+                      self.sequence_collection_different_type,
+                      self.lowercase_seqs)
 
         # Strategy:
         #   for each fasta file, read it without its corresponding qual file,
@@ -333,10 +455,11 @@ class ReaderTests(TestCase):
         for exp, kwargs, fasta_fps, qual_fps in test_cases:
             for fasta_fp in fasta_fps:
                 obs = list(_fasta_to_generator(fasta_fp, **kwargs))
-
                 self.assertEqual(len(obs), len(exp))
                 for o, e in zip(obs, exp):
-                    self.assertTrue(o.equals(e, ignore=['quality']))
+                    e = e.copy()
+                    del e.positional_metadata['quality']
+                    self.assertEqual(o, e)
 
                 for qual_fp in qual_fps:
                     obs = list(_fasta_to_generator(fasta_fp, qual=qual_fp,
@@ -344,11 +467,11 @@ class ReaderTests(TestCase):
 
                     self.assertEqual(len(obs), len(exp))
                     for o, e in zip(obs, exp):
-                        self.assertTrue(o.equals(e))
+                        self.assertEqual(o, e)
 
     def test_fasta_to_generator_invalid_files(self):
         for fp, kwargs, error_type, error_msg_regex in self.invalid_fps:
-            with self.assertRaisesRegexp(error_type, error_msg_regex):
+            with six.assertRaisesRegex(self, error_type, error_msg_regex):
                 list(_fasta_to_generator(fp, **kwargs))
 
     # light testing of fasta -> object readers to ensure interface is present
@@ -356,22 +479,28 @@ class ReaderTests(TestCase):
     # performed above
 
     def test_fasta_to_any_sequence(self):
-        for constructor, reader_fn in ((BiologicalSequence,
+        for constructor, reader_fn in ((Sequence,
                                         _fasta_to_biological_sequence),
-                                       (NucleotideSequence,
-                                        _fasta_to_nucleotide_sequence),
-                                       (DNA,
-                                        _fasta_to_dna_sequence),
-                                       (RNA,
-                                        _fasta_to_rna_sequence),
-                                       (Protein,
-                                        _fasta_to_protein_sequence)):
+                                       (partial(DNA, validate=False,
+                                                lowercase='introns'),
+                                        partial(_fasta_to_dna_sequence,
+                                                validate=False,
+                                                lowercase='introns')),
+                                       (partial(RNA, validate=False,
+                                                lowercase='introns'),
+                                        partial(_fasta_to_rna_sequence,
+                                                validate=False,
+                                                lowercase='introns')),
+                                       (partial(Protein, lowercase='introns'),
+                                        partial(_fasta_to_protein_sequence,
+                                                validate=False,
+                                                lowercase='introns'))):
 
             # empty file
             empty_fp = get_data_path('empty')
-            with self.assertRaisesRegexp(ValueError, '1st sequence'):
+            with six.assertRaisesRegex(self, ValueError, '1st sequence'):
                 reader_fn(empty_fp)
-            with self.assertRaisesRegexp(ValueError, '1st sequence'):
+            with six.assertRaisesRegex(self, ValueError, '1st sequence'):
                 reader_fn(empty_fp, qual=empty_fp)
 
             # the sequences in the following files don't necessarily make sense
@@ -389,17 +518,21 @@ class ReaderTests(TestCase):
                                  ['fasta_single_seq', 'fasta_max_width_1']))
             for fasta_fp in fasta_fps:
                 exp = constructor(
-                    'ACGT-acgt.', id='seq1', description='desc1',
-                    quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])
+                    'ACGT-acgt.',
+                    metadata={'id': 'seq1', 'description': 'desc1'})
 
                 obs = reader_fn(fasta_fp)
-                self.assertTrue(obs.equals(exp, ignore=['quality']))
+                self.assertEqual(obs, exp)
 
+                exp.positional_metadata.insert(
+                    0, 'quality',
+                    np.asarray([10, 20, 30, 10, 0, 0, 0, 255, 1, 255],
+                               dtype=np.uint8))
                 qual_fps = list(map(get_data_path,
                                     ['qual_single_seq', 'qual_max_width_1']))
                 for qual_fp in qual_fps:
                     obs = reader_fn(fasta_fp, qual=qual_fp)
-                    self.assertTrue(obs.equals(exp))
+                    self.assertEqual(obs, exp)
 
             # file with multiple seqs
             fasta_fps = list(map(get_data_path,
@@ -409,56 +542,73 @@ class ReaderTests(TestCase):
             for fasta_fp in fasta_fps:
                 # get first
                 exp = constructor(
-                    'ACGT-acgt.', id='seq1', description='desc1',
-                    quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])
+                    'ACGT-acgt.',
+                    metadata={'id': 'seq1', 'description': 'desc1'})
 
                 obs = reader_fn(fasta_fp)
-                self.assertTrue(obs.equals(exp, ignore=['quality']))
+                self.assertEqual(obs, exp)
 
+                exp.positional_metadata.insert(
+                    0, 'quality',
+                    np.asarray([10, 20, 30, 10, 0, 0, 0, 255, 1, 255],
+                               dtype=np.uint8))
                 for qual_fp in qual_fps:
                     obs = reader_fn(fasta_fp, qual=qual_fp)
-                    self.assertTrue(obs.equals(exp))
+                    self.assertEqual(obs, exp)
 
                 # get middle
-                exp = constructor('AcGtUTu', quality=[1, 2, 3, 4, 5, 6, 777])
+                exp = constructor('ACGTTGCAccGG',
+                                  metadata={'id': '', 'description': ''})
 
                 obs = reader_fn(fasta_fp, seq_num=4)
-                self.assertTrue(obs.equals(exp, ignore=['quality']))
+                self.assertEqual(obs, exp)
 
+                exp.positional_metadata.insert(
+                    0, 'quality',
+                    np.asarray([55, 10, 0, 99, 1, 1, 8, 77, 40, 10, 10, 0],
+                               dtype=np.uint8))
                 for qual_fp in qual_fps:
                     obs = reader_fn(fasta_fp, seq_num=4, qual=qual_fp)
-                    self.assertTrue(obs.equals(exp))
+                    self.assertEqual(obs, exp)
 
                 # get last
                 exp = constructor(
-                    'pQqqqPPQQQ', id='proteinseq',
-                    description='detailed description \t\twith  new  lines',
-                    quality=[42, 42, 442, 442, 42, 42, 42, 42, 42, 43])
-
-                obs = reader_fn(fasta_fp, seq_num=7)
-                self.assertTrue(obs.equals(exp, ignore=['quality']))
-
+                    'pQqqqPPQQQ',
+                    metadata={'id': 'proteinseq',
+                              'description':
+                                  'detailed description \t\twith  new  lines'})
+
+                obs = reader_fn(fasta_fp, seq_num=6)
+                self.assertEqual(obs, exp)
+
+                exp.positional_metadata.insert(
+                    0, 'quality',
+                    np.asarray([42, 42, 255, 255, 42, 42, 42, 42, 42, 43],
+                               dtype=np.uint8))
                 for qual_fp in qual_fps:
-                    obs = reader_fn(fasta_fp, seq_num=7, qual=qual_fp)
-                    self.assertTrue(obs.equals(exp))
+                    obs = reader_fn(fasta_fp, seq_num=6, qual=qual_fp)
+                    self.assertEqual(obs, exp)
 
                 # seq_num too large
-                with self.assertRaisesRegexp(ValueError, '8th sequence'):
+                with six.assertRaisesRegex(self, ValueError, '8th sequence'):
                     reader_fn(fasta_fp, seq_num=8)
                 for qual_fp in qual_fps:
-                    with self.assertRaisesRegexp(ValueError, '8th sequence'):
+                    with six.assertRaisesRegex(self, ValueError,
+                                               '8th sequence'):
                         reader_fn(fasta_fp, seq_num=8, qual=qual_fp)
 
                 # seq_num too small
-                with self.assertRaisesRegexp(ValueError, '`seq_num`=0'):
+                with six.assertRaisesRegex(self, ValueError, '`seq_num`=0'):
                     reader_fn(fasta_fp, seq_num=0)
                 for qual_fp in qual_fps:
-                    with self.assertRaisesRegexp(ValueError, '`seq_num`=0'):
+                    with six.assertRaisesRegex(self, ValueError,
+                                               '`seq_num`=0'):
                         reader_fn(fasta_fp, seq_num=0, qual=qual_fp)
 
     def test_fasta_to_sequence_collection_and_alignment(self):
         test_cases = (self.empty, self.single,
-                      self.sequence_collection_different_type)
+                      self.sequence_collection_different_type,
+                      self.lowercase_seqs)
 
         for constructor, reader_fn in ((SequenceCollection,
                                         _fasta_to_sequence_collection),
@@ -472,55 +622,66 @@ class ReaderTests(TestCase):
                 for fasta_fp in fasta_fps:
                     obs = reader_fn(fasta_fp, **kwargs)
 
-                    # TODO remove this custom equality testing code when
-                    # SequenceCollection has an equals method (part of #656).
-                    # We need this method to include IDs and description in the
-                    # comparison (not part of SequenceCollection.__eq__).
-                    self.assertEqual(obs, exp)
+                    self.assertEqual(len(obs), len(exp))
                     for o, e in zip(obs, exp):
-                        self.assertTrue(o.equals(e, ignore=['quality']))
+                        e = e.copy()
+                        del e.positional_metadata['quality']
+                        self.assertEqual(o, e)
 
                     for qual_fp in qual_fps:
                         obs = reader_fn(fasta_fp, qual=qual_fp, **kwargs)
-
-                        # TODO remove this custom equality testing code when
-                        # SequenceCollection has an equals method (part of
-                        # #656). We need this method to include IDs and
-                        # description in the comparison (not part of
-                        # SequenceCollection.__eq__).
                         self.assertEqual(obs, exp)
-                        for o, e in zip(obs, exp):
-                            self.assertTrue(o.equals(e))
 
 
 class WriterTests(TestCase):
     def setUp(self):
-        self.bio_seq1 = BiologicalSequence(
-            'ACGT-acgt.', id='seq1', description='desc1',
-            quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])
-        self.bio_seq2 = BiologicalSequence(
-            'A', id=' \n  \nseq \t2 ', quality=[42])
-        self.bio_seq3 = BiologicalSequence(
-            'AACGGuA', description='desc3', quality=[0, 0, 0, 0, 0, 0, 0])
-        self.nuc_seq = NucleotideSequence(
-            'AcGtUTu', quality=[1, 2, 3, 4, 5, 6, 777])
+        self.bio_seq1 = DNA(
+            'ACGT-acgt.',
+            metadata={'id': 'seq1', 'description': 'desc1'},
+            positional_metadata={'quality': [10, 20, 30, 10, 0, 0, 0, 255,
+                                             1, 255]},
+            lowercase='introns')
+        self.bio_seq2 = DNA(
+            'A',
+            metadata={'id': ' \n  \nseq \t2 '},
+            positional_metadata={'quality': [42]},
+            lowercase='introns')
+        self.bio_seq3 = RNA(
+            'AACGGuA',
+            metadata={'description': 'desc3'},
+            positional_metadata={'quality': [0, 0, 0, 0, 0, 0, 0]},
+            lowercase='introns')
         self.dna_seq = DNA(
             'ACGTTGCAccGG',
-            quality=[55, 10, 0, 999, 1, 1, 8, 775, 40, 10, 10, 0])
-        self.rna_seq = RNA('ACGUU', quality=[10, 9, 8, 7, 6])
+            positional_metadata={'quality': [55, 10, 0, 99, 1, 1, 8, 77, 40,
+                                             10, 10, 0]},
+            lowercase='introns')
+        self.rna_seq = RNA('ACGUU',
+                           positional_metadata={'quality': [10, 9, 8, 7, 6]},
+                           lowercase='introns')
         self.prot_seq = Protein(
-            'pQqqqPPQQQ', id='proteinseq',
-            description='\ndetailed\ndescription \t\twith  new\n\nlines\n\n\n',
-            quality=[42, 42, 442, 442, 42, 42, 42, 42, 42, 43])
+            'pQqqqPPQQQ',
+            metadata={'id': 'proteinseq',
+                      'description': "\ndetailed\ndescription \t\twith "
+                                     " new\n\nlines\n\n\n"},
+            positional_metadata={'quality': [42, 42, 255, 255, 42, 42, 42, 42,
+                                             42, 43]},
+            lowercase='introns')
 
         seqs = [
-            RNA('UUUU', id='s\te\tq\t1', description='desc\n1',
-                quality=[1234, 0, 0, 2]),
-            BiologicalSequence(
-                'CATC', id='s\te\tq\t2', description='desc\n2',
-                quality=[1, 11, 111, 11112]),
-            Protein('sits', id='s\te\tq\t3', description='desc\n3',
-                    quality=[12345, 678909, 999999, 4242424242])
+            RNA('UUUU',
+                metadata={'id': 's\te\tq\t1', 'description': 'desc\n1'},
+                positional_metadata={'quality': [1234, 0, 0, 2]},
+                lowercase='introns'),
+            Sequence(
+                'CATC',
+                metadata={'id': 's\te\tq\t2', 'description': 'desc\n2'},
+                positional_metadata={'quality': [1, 11, 111, 11112]}),
+            Protein('sits',
+                    metadata={'id': 's\te\tq\t3', 'description': 'desc\n3'},
+                    positional_metadata={'quality': [12345, 678909, 999999,
+                                                     4242424242]},
+                    validate=False)
         ]
         self.seq_coll = SequenceCollection(seqs)
         self.align = Alignment(seqs)
@@ -536,15 +697,17 @@ class WriterTests(TestCase):
         # description_newline_replacement)
         def newline_description_gen():
             yield self.prot_seq
-            yield DNA('AGGAGAATA', id='foo', description='\n\n\n\n',
-                      quality=range(9))
+            yield DNA('AGGAGAATA',
+                      metadata={'id': 'foo', 'description': '\n\n\n\n'},
+                      positional_metadata={'quality': range(9)},
+                      lowercase='introns')
 
         # generate sequences with ids containing whitespace (to test
         # id_whitespace_replacement)
         def whitespace_id_gen():
             yield self.bio_seq2
-            yield RNA('UA', id='\n\t \t', description='a\nb',
-                      quality=[1000, 1])
+            yield RNA('UA', metadata={'id': '\n\t \t', 'description': 'a\nb'},
+                      positional_metadata={'quality': [1000, 1]})
 
         # multiple sequences of mixed types, lengths, and metadata. lengths are
         # chosen to exercise various splitting cases when testing max_width,
@@ -552,15 +715,16 @@ class WriterTests(TestCase):
         # sequence data vs. quality scores
         def multi_seq_gen():
             for seq in (self.bio_seq1, self.bio_seq2, self.bio_seq3,
-                        self.nuc_seq, self.dna_seq, self.rna_seq,
-                        self.prot_seq):
+                        self.dna_seq, self.rna_seq, self.prot_seq):
                 yield seq
 
         # can be serialized if no qual file is provided, else it should raise
         # an error because one seq has qual scores and the other doesn't
         def mixed_qual_score_gen():
-            missing_qual_seq = BiologicalSequence(
-                'AAAAT', id='da,dadadada', description='10 hours')
+            missing_qual_seq = DNA(
+                'AAAAT', metadata={'id': 'da,dadadada',
+                                   'description': '10 hours'},
+                lowercase='introns')
             for seq in self.bio_seq1, missing_qual_seq:
                 yield seq
 
@@ -571,32 +735,39 @@ class WriterTests(TestCase):
         self.objs_fps = list(map(lambda e: (e[0], e[1], get_data_path(e[2]),
                                             get_data_path(e[3])), [
             (empty_gen(), {}, 'empty', 'empty'),
-            (single_seq_gen(), {}, 'fasta_single_seq', 'qual_single_seq'),
+            (single_seq_gen(), {'lowercase': 'introns'}, 'fasta_single_seq',
+             'qual_single_seq'),
 
             # no splitting of sequence or qual data across lines b/c max_width
             # is sufficiently large
-            (single_seq_gen(), {'max_width': 32}, 'fasta_single_seq',
+            (single_seq_gen(), {'max_width': 32, 'lowercase': 'introns'},
+             'fasta_single_seq',
              'qual_single_seq'),
 
             # splitting algorithm for sequence and qual scores is different;
             # make sure individual qual scores aren't split across lines even
             # if they exceed max_width
-            (single_seq_gen(), {'max_width': 1}, 'fasta_max_width_1',
+            (single_seq_gen(), {'max_width': 1, 'lowercase': 'introns'},
+             'fasta_max_width_1',
              'qual_max_width_1'),
-
-            (multi_seq_gen(), {}, 'fasta_multi_seq', 'qual_multi_seq'),
-            (multi_seq_gen(), {'max_width': 5}, 'fasta_max_width_5',
+            (multi_seq_gen(),
+             {'lowercase': 'introns'}, 'fasta_multi_seq', 'qual_multi_seq'),
+            (multi_seq_gen(),
+             {'max_width': 5, 'lowercase': 'introns'}, 'fasta_max_width_5',
              'qual_max_width_5'),
             (newline_description_gen(),
-             {'description_newline_replacement': ':-)'},
+             {'description_newline_replacement': ':-)',
+              'lowercase': 'introns'},
              'fasta_description_newline_replacement_multi_char',
              'qual_description_newline_replacement_multi_char'),
             (newline_description_gen(),
-             {'description_newline_replacement': ''},
+             {'description_newline_replacement': '',
+              'lowercase': 'introns'},
              'fasta_description_newline_replacement_empty_str',
              'qual_description_newline_replacement_empty_str',),
             (newline_description_gen(),
-             {'description_newline_replacement': None},
+             {'description_newline_replacement': None,
+              'lowercase': 'introns'},
              'fasta_description_newline_replacement_none',
              'qual_description_newline_replacement_none'),
             (whitespace_id_gen(),
@@ -614,7 +785,7 @@ class WriterTests(TestCase):
         ]))
 
         def blank_seq_gen():
-            for seq in self.bio_seq1, BiologicalSequence(''):
+            for seq in self.bio_seq1, Sequence(''):
                 yield seq
 
         # generators or parameter combos that cannot be written in fasta
@@ -628,7 +799,7 @@ class WriterTests(TestCase):
              ValueError, 'Newline character'),
             (multi_seq_gen(), {'description_newline_replacement': '-.-\n'},
              ValueError, 'Newline character'),
-            (mixed_qual_score_gen(), {'qual': StringIO()}, ValueError,
+            (mixed_qual_score_gen(), {'qual': io.StringIO()}, ValueError,
              '2nd sequence.*does not have quality scores')
         ]
 
@@ -638,25 +809,24 @@ class WriterTests(TestCase):
     def test_generator_to_fasta_no_qual(self):
         # test writing standalone fasta (i.e., without a qual file)
         for obj, kwargs, fp, _ in self.objs_fps:
-            fh = StringIO()
+            fh = io.StringIO()
             _generator_to_fasta(obj, fh, **kwargs)
             obs = fh.getvalue()
             fh.close()
 
-            with open(fp, 'U') as fh:
+            with io.open(fp) as fh:
                 exp = fh.read()
-
             self.assertEqual(obs, exp)
 
     def test_generator_to_fasta_mixed_qual_scores(self):
         # test writing some sequences with qual scores and some without is
         # possible if no qual output file is specified
-        fh = StringIO()
-        _generator_to_fasta(self.mixed_qual_score_gen, fh)
+        fh = io.StringIO()
+        _generator_to_fasta(self.mixed_qual_score_gen, fh, lowercase='introns')
         obs = fh.getvalue()
         fh.close()
 
-        with open(get_data_path('fasta_mixed_qual_scores'), 'U') as fh:
+        with io.open(get_data_path('fasta_mixed_qual_scores')) as fh:
             exp = fh.read()
 
         self.assertEqual(obs, exp)
@@ -665,17 +835,17 @@ class WriterTests(TestCase):
         # test writing fasta and qual files
         for obj, kwargs, fasta_fp, qual_fp in self.objs_fps:
             if qual_fp is not None:
-                fasta_fh = StringIO()
-                qual_fh = StringIO()
+                fasta_fh = io.StringIO()
+                qual_fh = io.StringIO()
                 _generator_to_fasta(obj, fasta_fh, qual=qual_fh, **kwargs)
                 obs_fasta = fasta_fh.getvalue()
                 obs_qual = qual_fh.getvalue()
                 fasta_fh.close()
                 qual_fh.close()
 
-                with open(fasta_fp, 'U') as fh:
+                with io.open(fasta_fp) as fh:
                     exp_fasta = fh.read()
-                with open(qual_fp, 'U') as fh:
+                with io.open(qual_fp) as fh:
                     exp_qual = fh.read()
 
                 self.assertEqual(obs_fasta, exp_fasta)
@@ -683,15 +853,25 @@ class WriterTests(TestCase):
 
     def test_generator_to_fasta_invalid_input(self):
         for obj, kwargs, error_type, error_msg_regexp in self.invalid_objs:
-            fh = StringIO()
-            with self.assertRaisesRegexp(error_type, error_msg_regexp):
+            fh = io.StringIO()
+            with six.assertRaisesRegex(self, error_type, error_msg_regexp):
                 _generator_to_fasta(obj, fh, **kwargs)
             fh.close()
 
+    def test_generator_to_fasta_sequence_lowercase_exception(self):
+        seq = Sequence('ACgt', metadata={'id': ''})
+        fh = io.StringIO()
+        with six.assertRaisesRegex(self, AttributeError,
+                                   "lowercase specified but class Sequence "
+                                   "does not support lowercase "
+                                   "functionality"):
+            _generator_to_fasta(SequenceCollection([seq]), fh,
+                                lowercase='introns')
+        fh.close()
+
     # light testing of object -> fasta writers to ensure interface is present
     # and kwargs are passed through. extensive testing of underlying writer is
     # performed above
-
     def test_any_sequence_to_fasta(self):
         # store writer function, sequence object to write, expected
         # fasta filepath for default parameters, expected fasta filepath for
@@ -700,29 +880,29 @@ class WriterTests(TestCase):
         desc = 'b\na\nr'
         test_data = (
             (_biological_sequence_to_fasta,
-             BiologicalSequence('ACGT', id=id_, description=desc,
-                                quality=range(1, 5)),
+             Sequence('ACgt', metadata={'id': id_, 'description': desc},
+                      positional_metadata={'quality': range(1, 5)}),
              ('fasta_single_bio_seq_defaults',
               'fasta_single_bio_seq_non_defaults',
               'qual_single_bio_seq_non_defaults')),
-            (_nucleotide_sequence_to_fasta,
-             NucleotideSequence('ACGTU', id=id_, description=desc,
-                                quality=range(5)),
-             ('fasta_single_nuc_seq_defaults',
-              'fasta_single_nuc_seq_non_defaults',
-              'qual_single_nuc_seq_non_defaults')),
-            (_dna_sequence_to_fasta,
-             DNA('TACG', id=id_, description=desc, quality=range(4)),
+            (partial(_dna_sequence_to_fasta, lowercase='introns'),
+             DNA('TAcg', metadata={'id': id_, 'description': desc},
+                 positional_metadata={'quality': range(4)},
+                 lowercase='introns'),
              ('fasta_single_dna_seq_defaults',
               'fasta_single_dna_seq_non_defaults',
               'qual_single_dna_seq_non_defaults')),
-            (_rna_sequence_to_fasta,
-             RNA('UACG', id=id_, description=desc, quality=range(2, 6)),
+            (partial(_rna_sequence_to_fasta, lowercase='introns'),
+             RNA('uaCG', metadata={'id': id_, 'description': desc},
+                 positional_metadata={'quality': range(2, 6)},
+                 lowercase='introns'),
              ('fasta_single_rna_seq_defaults',
               'fasta_single_rna_seq_non_defaults',
               'qual_single_rna_seq_non_defaults')),
-            (_protein_sequence_to_fasta,
-             Protein('PQQ', id=id_, description=desc, quality=[42, 41, 40]),
+            (partial(_protein_sequence_to_fasta, lowercase='introns'),
+             Protein('PqQ', metadata={'id': id_, 'description': desc},
+                     positional_metadata={'quality': [42, 41, 40]},
+                     lowercase='introns'),
              ('fasta_single_prot_seq_defaults',
               'fasta_single_prot_seq_non_defaults',
               'qual_single_prot_seq_non_defaults')))
@@ -731,19 +911,19 @@ class WriterTests(TestCase):
             defaults_fp, non_defaults_fasta_fp, non_defaults_qual_fp = fps
 
             # test writing with default parameters
-            fh = StringIO()
+            fh = io.StringIO()
             fn(obj, fh)
             obs = fh.getvalue()
             fh.close()
 
-            with open(get_data_path(defaults_fp), 'U') as fh:
+            with io.open(get_data_path(defaults_fp)) as fh:
                 exp = fh.read()
 
             self.assertEqual(obs, exp)
 
             # test writing with non-defaults
-            fasta_fh = StringIO()
-            qual_fh = StringIO()
+            fasta_fh = io.StringIO()
+            qual_fh = io.StringIO()
             fn(obj, fasta_fh, id_whitespace_replacement='-',
                description_newline_replacement='_', max_width=1, qual=qual_fh)
             obs_fasta = fasta_fh.getvalue()
@@ -751,9 +931,9 @@ class WriterTests(TestCase):
             fasta_fh.close()
             qual_fh.close()
 
-            with open(get_data_path(non_defaults_fasta_fp), 'U') as fh:
+            with io.open(get_data_path(non_defaults_fasta_fp)) as fh:
                 exp_fasta = fh.read()
-            with open(get_data_path(non_defaults_qual_fp), 'U') as fh:
+            with io.open(get_data_path(non_defaults_qual_fp)) as fh:
                 exp_qual = fh.read()
 
             self.assertEqual(obs_fasta, exp_fasta)
@@ -763,19 +943,19 @@ class WriterTests(TestCase):
         for fn, obj in ((_sequence_collection_to_fasta, self.seq_coll),
                         (_alignment_to_fasta, self.align)):
             # test writing with default parameters
-            fh = StringIO()
+            fh = io.StringIO()
             fn(obj, fh)
             obs = fh.getvalue()
             fh.close()
 
-            with open(get_data_path('fasta_3_seqs_defaults'), 'U') as fh:
+            with io.open(get_data_path('fasta_3_seqs_defaults')) as fh:
                 exp = fh.read()
 
             self.assertEqual(obs, exp)
 
             # test writing with non-defaults
-            fasta_fh = StringIO()
-            qual_fh = StringIO()
+            fasta_fh = io.StringIO()
+            qual_fh = io.StringIO()
             fn(obj, fasta_fh, id_whitespace_replacement='*',
                description_newline_replacement='+', max_width=3, qual=qual_fh)
             obs_fasta = fasta_fh.getvalue()
@@ -783,14 +963,34 @@ class WriterTests(TestCase):
             fasta_fh.close()
             qual_fh.close()
 
-            with open(get_data_path('fasta_3_seqs_non_defaults'), 'U') as fh:
+            with io.open(get_data_path('fasta_3_seqs_non_defaults')) as fh:
                 exp_fasta = fh.read()
-            with open(get_data_path('qual_3_seqs_non_defaults'), 'U') as fh:
+            with io.open(get_data_path('qual_3_seqs_non_defaults')) as fh:
                 exp_qual = fh.read()
 
             self.assertEqual(obs_fasta, exp_fasta)
             self.assertEqual(obs_qual, exp_qual)
 
+            fh2 = io.StringIO()
+            with six.assertRaisesRegex(self, AttributeError,
+                                       "lowercase specified but class "
+                                       "Sequence does not support lowercase "
+                                       "functionality"):
+                fn(obj, fh2, lowercase='introns')
+            fh2.close()
+
+            fasta_fh2 = io.StringIO()
+            qual_fh2 = io.StringIO()
+            with six.assertRaisesRegex(self, AttributeError,
+                                       "lowercase specified but class "
+                                       "Sequence does not support lowercase "
+                                       "functionality"):
+                fn(obj, fasta_fh2, id_whitespace_replacement='*',
+                   description_newline_replacement='+', max_width=3,
+                   qual=qual_fh2, lowercase='introns')
+            fasta_fh2.close()
+            qual_fh2.close()
+
 
 class RoundtripTests(TestCase):
     def test_roundtrip_generators(self):
@@ -802,13 +1002,13 @@ class RoundtripTests(TestCase):
                          'qual_multi_seq_roundtrip')]))
 
         for fasta_fp, qual_fp in fps:
-            with open(fasta_fp, 'U') as fh:
+            with io.open(fasta_fp) as fh:
                 exp_fasta = fh.read()
-            with open(qual_fp, 'U') as fh:
+            with io.open(qual_fp) as fh:
                 exp_qual = fh.read()
 
-            fasta_fh = StringIO()
-            qual_fh = StringIO()
+            fasta_fh = io.StringIO()
+            qual_fh = io.StringIO()
             _generator_to_fasta(_fasta_to_generator(fasta_fp, qual=qual_fp),
                                 fasta_fh, qual=qual_fh)
             obs_fasta = fasta_fh.getvalue()
@@ -834,8 +1034,8 @@ class RoundtripTests(TestCase):
                 obj1 = reader(fasta_fp, qual=qual_fp)
 
                 # write
-                fasta_fh = StringIO()
-                qual_fh = StringIO()
+                fasta_fh = io.StringIO()
+                qual_fh = io.StringIO()
                 writer(obj1, fasta_fh, qual=qual_fh)
                 fasta_fh.seek(0)
                 qual_fh.seek(0)
@@ -845,13 +1045,7 @@ class RoundtripTests(TestCase):
                 fasta_fh.close()
                 qual_fh.close()
 
-                # TODO remove this custom equality testing code when
-                # SequenceCollection has an equals method (part of #656).
-                # We need this method to include IDs and description in the
-                # comparison (not part of SequenceCollection.__eq__).
                 self.assertEqual(obj1, obj2)
-                for s1, s2 in zip(obj1, obj2):
-                    self.assertTrue(s1.equals(s2))
 
     def test_roundtrip_biological_sequences(self):
         fps = list(map(lambda e: list(map(get_data_path, e)),
@@ -862,21 +1056,22 @@ class RoundtripTests(TestCase):
 
         for reader, writer in ((_fasta_to_biological_sequence,
                                 _biological_sequence_to_fasta),
-                               (_fasta_to_nucleotide_sequence,
-                                _nucleotide_sequence_to_fasta),
-                               (_fasta_to_dna_sequence,
+                               (partial(_fasta_to_dna_sequence,
+                                        validate=False),
                                 _dna_sequence_to_fasta),
-                               (_fasta_to_rna_sequence,
+                               (partial(_fasta_to_rna_sequence,
+                                        validate=False),
                                 _rna_sequence_to_fasta),
-                               (_fasta_to_protein_sequence,
+                               (partial(_fasta_to_protein_sequence,
+                                        validate=False),
                                 _protein_sequence_to_fasta)):
             for fasta_fp, qual_fp in fps:
                 # read
                 obj1 = reader(fasta_fp, qual=qual_fp)
 
                 # write
-                fasta_fh = StringIO()
-                qual_fh = StringIO()
+                fasta_fh = io.StringIO()
+                qual_fh = io.StringIO()
                 writer(obj1, fasta_fh, qual=qual_fh)
                 fasta_fh.seek(0)
                 qual_fh.seek(0)
@@ -886,7 +1081,7 @@ class RoundtripTests(TestCase):
                 fasta_fh.close()
                 qual_fh.close()
 
-                self.assertTrue(obj1.equals(obj2))
+                self.assertEqual(obj1, obj2)
 
 
 if __name__ == '__main__':
diff --git a/skbio/io/tests/test_fastq.py b/skbio/io/format/tests/test_fastq.py
similarity index 57%
rename from skbio/io/tests/test_fastq.py
rename to skbio/io/format/tests/test_fastq.py
index 9da5b6c..f575478 100644
--- a/skbio/io/tests/test_fastq.py
+++ b/skbio/io/format/tests/test_fastq.py
@@ -8,22 +8,25 @@
 
 from __future__ import absolute_import, division, print_function
 from future.builtins import zip
-from six import StringIO
+import six
 
+import io
 import unittest
 import warnings
+from functools import partial
 
-from skbio import (read, write, BiologicalSequence, NucleotideSequence,
-                   DNASequence, RNASequence, ProteinSequence,
+from skbio import (read, write, Sequence, DNA, RNA, Protein,
                    SequenceCollection, Alignment)
 from skbio.io import FASTQFormatError
-from skbio.io.fastq import (
+from skbio.io.format.fastq import (
     _fastq_sniffer, _fastq_to_generator, _fastq_to_sequence_collection,
     _fastq_to_alignment, _generator_to_fastq, _sequence_collection_to_fastq,
     _alignment_to_fastq)
 
 from skbio.util import get_data_path
 
+import numpy as np
+
 # Note: the example FASTQ files with file extension .fastq are taken from the
 # following open-access publication's supplementary data:
 #
@@ -49,6 +52,13 @@ class TestSniffer(unittest.TestCase):
     def setUp(self):
         self.positives = [get_data_path(e) for e in [
             'fastq_multi_seq_sanger',
+            'fastq_multi_blank_between_records',
+            'fastq_multi_ws_lines_between_records',
+            'fastq_multi_blank_end_of_file',
+            'fastq_multi_ws_lines_end_of_file',
+            'fastq_multi_whitespace_stripping',
+            'fastq_blank_lines',
+            'fastq_whitespace_only_lines',
             'fastq_single_seq_illumina1.3',
             'fastq_wrapping_as_illumina_no_description',
             'fastq_wrapping_as_sanger_no_description',
@@ -56,6 +66,8 @@ class TestSniffer(unittest.TestCase):
             'fastq_writer_illumina1.3_defaults',
             'fastq_writer_sanger_defaults',
             'fastq_writer_sanger_non_defaults',
+            'fastq_5_blanks_start_of_file',
+            'fastq_5_ws_lines_start_of_file',
             'illumina_full_range_as_illumina.fastq',
             'illumina_full_range_as_sanger.fastq',
             'illumina_full_range_original_illumina.fastq',
@@ -80,6 +92,18 @@ class TestSniffer(unittest.TestCase):
         self.negatives = [get_data_path(e) for e in [
             'empty',
             'whitespace_only',
+            'fastq_multi_blank_start_of_file',
+            'fastq_multi_ws_lines_start_of_file',
+            'fastq_invalid_blank_after_header',
+            'fastq_invalid_blank_after_seq',
+            'fastq_invalid_blank_after_plus',
+            'fastq_invalid_blank_within_seq',
+            'fastq_invalid_blank_within_qual',
+            'fastq_invalid_ws_line_after_header',
+            'fastq_invalid_ws_line_after_seq',
+            'fastq_invalid_ws_line_after_plus',
+            'fastq_invalid_ws_line_within_seq',
+            'fastq_invalid_ws_line_within_qual',
             'fastq_invalid_missing_header',
             'fastq_invalid_missing_seq_data',
             'error_diff_ids.fastq',
@@ -117,25 +141,38 @@ class TestSniffer(unittest.TestCase):
 
 class TestReaders(unittest.TestCase):
     def setUp(self):
-        self.valid_files = [
-            (get_data_path('empty'),
+        self.valid_configurations = [
+            ([get_data_path('empty'),
+              get_data_path('whitespace_only')],
              [{},
               {'variant': 'illumina1.8'},
-              {'phred_offset': 33, 'constructor': DNASequence}],
+              {'phred_offset': 33,
+               'constructor': DNA}],
              []),
 
-            (get_data_path('fastq_single_seq_illumina1.3'), [
+            ([get_data_path('fastq_single_seq_illumina1.3')], [
                 {'variant': 'illumina1.3'},
                 {'phred_offset': 64},
-                {'variant': 'illumina1.3', 'constructor': ProteinSequence},
+                {'variant': 'illumina1.3',
+                 'constructor': Protein},
             ], [
-                ('', 'bar\t baz', 'ACGT', [33, 34, 35, 36])
+                ('', 'bar\t baz', 'aCGT', [33, 34, 35, 36])
             ]),
 
-            (get_data_path('fastq_multi_seq_sanger'), [
+            ([get_data_path('fastq_multi_seq_sanger'),
+              get_data_path('fastq_whitespace_only_lines'),
+              get_data_path('fastq_blank_lines'),
+              get_data_path('fastq_multi_blank_between_records'),
+              get_data_path('fastq_multi_ws_lines_between_records'),
+              get_data_path('fastq_multi_blank_end_of_file'),
+              get_data_path('fastq_multi_ws_lines_end_of_file'),
+              get_data_path('fastq_multi_blank_start_of_file'),
+              get_data_path('fastq_multi_ws_lines_start_of_file'),
+              get_data_path('fastq_multi_whitespace_stripping')], [
                 {'variant': 'sanger'},
                 {'phred_offset': 33, 'seq_num': 2},
-                {'variant': 'sanger', 'constructor': RNASequence,
+                {'variant': 'sanger',
+                 'constructor': partial(RNA, validate=False),
                  'seq_num': 3},
             ], [
                 ('foo', 'bar baz', 'AACCGG',
@@ -145,10 +182,40 @@ class TestReaders(unittest.TestCase):
                 ('baz', 'foo bar', 'GATTTC',
                  [20, 21, 22, 23, 24, 18])
             ]),
+
+
         ]
 
         self.invalid_files = [(get_data_path(e[0]), e[1], e[2]) for e in [
-            ('whitespace_only', FASTQFormatError, 'blank line.*FASTQ'),
+            ('fastq_invalid_blank_after_header', FASTQFormatError,
+             'blank or whitespace-only line.*after header.*in FASTQ'),
+
+            ('fastq_invalid_blank_after_seq', FASTQFormatError,
+             "blank or whitespace-only line.*before '\+' in FASTQ"),
+
+            ('fastq_invalid_blank_after_plus', FASTQFormatError,
+             "blank or whitespace-only line.*after '\+'.*in FASTQ"),
+
+            ('fastq_invalid_blank_within_seq', FASTQFormatError,
+             'blank or whitespace-only line.*within sequence.*FASTQ'),
+
+            ('fastq_invalid_blank_within_qual', FASTQFormatError,
+             "blank or whitespace-only line.*within quality scores.*in FASTQ"),
+
+            ('fastq_invalid_ws_line_after_header', FASTQFormatError,
+             'blank or whitespace-only line.*after header.*in FASTQ'),
+
+            ('fastq_invalid_ws_line_after_seq', FASTQFormatError,
+             "blank or whitespace-only line.*before '\+' in FASTQ"),
+
+            ('fastq_invalid_ws_line_after_plus', FASTQFormatError,
+             "blank or whitespace-only line.*after '\+'.*in FASTQ"),
+
+            ('fastq_invalid_ws_line_within_seq', FASTQFormatError,
+             'blank or whitespace-only line.*within sequence.*FASTQ'),
+
+            ('fastq_invalid_ws_line_within_qual', FASTQFormatError,
+             "blank or whitespace-only line.*within quality scores.*in FASTQ"),
 
             ('fastq_invalid_missing_header', FASTQFormatError,
              "sequence.*header.*start of file: 'seq1 desc1'"),
@@ -170,7 +237,7 @@ class TestReaders(unittest.TestCase):
             ('error_long_qual.fastq', FASTQFormatError, "Extra quality.*'Y'"),
 
             ('error_no_qual.fastq', FASTQFormatError,
-             'blank line.*FASTQ'),
+             "blank or whitespace-only line.*after '\+'.*in FASTQ"),
 
             ('error_qual_del.fastq', ValueError,
              'Decoded Phred score.*out of range'),
@@ -203,10 +270,10 @@ class TestReaders(unittest.TestCase):
              r"whitespace.*sequence data: 'GATGTGCAA\\tTACCTTTGTA\\tGAGGAA'"),
 
             ('error_trunc_at_seq.fastq', FASTQFormatError,
-             'blank line.*FASTQ'),
+             'incomplete/truncated.*FASTQ'),
 
             ('error_trunc_at_plus.fastq', FASTQFormatError,
-             'blank line.*FASTQ'),
+             'incomplete/truncated.*FASTQ'),
 
             ('error_trunc_at_qual.fastq', FASTQFormatError,
              'incomplete/truncated.*end of file'),
@@ -226,28 +293,43 @@ class TestReaders(unittest.TestCase):
         ]]
 
     def test_fastq_to_generator_valid_files(self):
-        for valid, kwargs, components in self.valid_files:
-            for kwarg in kwargs:
-                _drop_kwargs(kwarg, 'seq_num')
-                constructor = kwarg.get('constructor', BiologicalSequence)
-                expected = [constructor(c[2], id=c[0], description=c[1],
-                            quality=c[3]) for c in components]
-
-                observed = list(_fastq_to_generator(valid, **kwarg))
-                self.assertEqual(len(expected), len(observed))
-                for o, e in zip(observed, expected):
-                    self.assertTrue(o.equals(e))
+        for valid_files, kwargs, components in self.valid_configurations:
+            for valid in valid_files:
+                for observed_kwargs in kwargs:
+                    _drop_kwargs(observed_kwargs, 'seq_num')
+                    constructor = observed_kwargs.get('constructor', Sequence)
+
+                    # Can't use partials for this because the read
+                    # function below can't operate on partials
+                    expected_kwargs = {}
+                    if hasattr(constructor, 'lowercase'):
+                        expected_kwargs['lowercase'] = 'introns'
+                        observed_kwargs['lowercase'] = 'introns'
+
+                    expected = [constructor(c[2],
+                                            metadata={'id': c[0],
+                                                      'description': c[1]},
+                                positional_metadata={'quality': np.array(c[3],
+                                                     dtype=np.uint8)},
+                                **expected_kwargs)
+                                for c in components]
+
+                    observed = list(_fastq_to_generator(valid,
+                                                        **observed_kwargs))
+                    self.assertEqual(len(expected), len(observed))
+                    for o, e in zip(observed, expected):
+                        self.assertEqual(o, e)
 
     def test_fastq_to_generator_invalid_files_all_variants(self):
         # files that should be invalid for all variants, as well as custom
         # phred offsets
         for fp, error_type, error_msg_regex in self.invalid_files:
             for variant in 'sanger', 'illumina1.3', 'illumina1.8':
-                with self.assertRaisesRegexp(error_type, error_msg_regex):
+                with six.assertRaisesRegex(self, error_type, error_msg_regex):
                     list(_fastq_to_generator(fp, variant=variant))
 
             for offset in 33, 64, 40, 77:
-                with self.assertRaisesRegexp(error_type, error_msg_regex):
+                with six.assertRaisesRegex(self, error_type, error_msg_regex):
                     list(_fastq_to_generator(fp, phred_offset=offset))
 
     def test_fastq_to_generator_invalid_files_illumina(self):
@@ -257,9 +339,11 @@ class TestReaders(unittest.TestCase):
                'solexa_full_range_original_solexa.fastq']]
 
         for fp in fps:
-            with self.assertRaisesRegexp(ValueError, 'out of range \[0, 62\]'):
+            with six.assertRaisesRegex(self, ValueError,
+                                       'out of range \[0, 62\]'):
                 list(_fastq_to_generator(fp, variant='illumina1.3'))
-            with self.assertRaisesRegexp(ValueError, 'out of range \[0, 62\]'):
+            with six.assertRaisesRegex(self, ValueError,
+                                       'out of range \[0, 62\]'):
                 list(_fastq_to_generator(fp, variant='illumina1.8'))
 
     def test_fastq_to_generator_solexa(self):
@@ -271,66 +355,110 @@ class TestReaders(unittest.TestCase):
                 variant='solexa'))
 
     def test_fastq_to_sequence(self):
-        for constructor in [BiologicalSequence, NucleotideSequence,
-                            DNASequence, RNASequence, ProteinSequence]:
-            for valid, kwargs, components in self.valid_files:
-                # skip empty file case since we cannot read a specific sequence
-                # from an empty file
-                if len(components) == 0:
-                    continue
-
-                for kwarg in kwargs:
-                    _drop_kwargs(kwarg, 'constructor')
-
-                    seq_num = kwarg.get('seq_num', 1)
-                    c = components[seq_num - 1]
-                    expected = constructor(c[2], id=c[0], description=c[1],
-                                           quality=c[3])
-
-                    observed = read(valid, into=constructor, format='fastq',
-                                    verify=False, **kwarg)
-                    self.assertTrue(observed.equals(expected))
+        for constructor in [Sequence, DNA, RNA, Protein]:
+            for valid_files, kwargs, components in self.valid_configurations:
+                for valid in valid_files:
+                    # skip empty file case since we cannot read a specific
+                    # sequencefrom an empty file
+                    if len(components) == 0:
+                        continue
+
+                    for observed_kwargs in kwargs:
+                        expected_kwargs = {}
+
+                        # TODO:
+                        # some of the test files contain characters which are
+                        # invalid for RNA, so don't validate for now. Need to
+                        # fix this
+                        if constructor is RNA:
+                            observed_kwargs['validate'] = False
+                            expected_kwargs['validate'] = False
+
+                        _drop_kwargs(observed_kwargs, 'constructor')
+
+                        # Can't use partials for this because the read
+                        # function below can't operate on partials
+                        if hasattr(constructor, 'lowercase'):
+                            expected_kwargs['lowercase'] = 'introns'
+                            observed_kwargs['lowercase'] = 'introns'
+
+                        seq_num = observed_kwargs.get('seq_num', 1)
+                        c = components[seq_num - 1]
+                        expected = \
+                            constructor(
+                                c[2], metadata={'id': c[0],
+                                                'description': c[1]},
+                                positional_metadata={'quality': np.array(c[3],
+                                                     dtype=np.uint8)},
+                                **expected_kwargs)
+
+                        observed = read(valid, into=constructor,
+                                        format='fastq', verify=False,
+                                        **observed_kwargs)
+                        self.assertEqual(observed, expected)
 
     def test_fastq_to_sequence_collection(self):
-        for valid, kwargs, components in self.valid_files:
-            for kwarg in kwargs:
-                _drop_kwargs(kwarg, 'seq_num')
-                constructor = kwarg.get('constructor', BiologicalSequence)
-                expected = SequenceCollection(
-                    [constructor(c[2], id=c[0], description=c[1], quality=c[3])
-                     for c in components])
-
-                observed = _fastq_to_sequence_collection(valid, **kwarg)
-                # TODO remove when #656 is resolved
-                self.assertEqual(observed, expected)
-                for o, e in zip(observed, expected):
-                    self.assertTrue(o.equals(e))
+        for valid_files, kwargs, components in self.valid_configurations:
+            for valid in valid_files:
+                for observed_kwargs in kwargs:
+                    _drop_kwargs(observed_kwargs, 'seq_num')
+                    constructor = observed_kwargs.get('constructor', Sequence)
+
+                    # Can't use partials for this because the read
+                    # function below can't operate on partials
+                    expected_kwargs = {}
+                    if hasattr(constructor, 'lowercase'):
+                        expected_kwargs['lowercase'] = 'introns'
+                        observed_kwargs['lowercase'] = 'introns'
+
+                    expected = SequenceCollection(
+                        [constructor(
+                            c[2], metadata={'id': c[0], 'description': c[1]},
+                            positional_metadata={'quality': np.array(c[3],
+                                                 np.uint8)},
+                            **expected_kwargs)
+                         for c in components])
+
+                    observed = _fastq_to_sequence_collection(valid,
+                                                             **observed_kwargs)
+                    self.assertEqual(observed, expected)
 
     def test_fastq_to_alignment(self):
-        for valid, kwargs, components in self.valid_files:
-            for kwarg in kwargs:
-                _drop_kwargs(kwarg, 'seq_num')
-                constructor = kwarg.get('constructor', BiologicalSequence)
-                expected = Alignment(
-                    [constructor(c[2], id=c[0], description=c[1], quality=c[3])
-                     for c in components])
-
-                observed = _fastq_to_alignment(valid, **kwarg)
-                # TODO remove when #656 is resolved
-                self.assertEqual(observed, expected)
-                for o, e in zip(observed, expected):
-                    self.assertTrue(o.equals(e))
+        for valid_files, kwargs, components in self.valid_configurations:
+            for valid in valid_files:
+                for observed_kwargs in kwargs:
+                    _drop_kwargs(observed_kwargs, 'seq_num')
+                    constructor = observed_kwargs.get('constructor', Sequence)
+
+                    # Can't use partials for this because the read
+                    # function below can't operate on partials
+                    expected_kwargs = {}
+                    if hasattr(constructor, 'lowercase'):
+                        expected_kwargs['lowercase'] = 'introns'
+                        observed_kwargs['lowercase'] = 'introns'
+
+                    expected = Alignment(
+                        [constructor(
+                            c[2], metadata={'id': c[0],
+                                            'description': c[1]},
+                            positional_metadata={'quality': np.array(c[3],
+                                                 dtype=np.uint8)},
+                            **expected_kwargs)
+                         for c in components])
+
+                    observed = _fastq_to_alignment(valid, **observed_kwargs)
+                    self.assertEqual(observed, expected)
 
 
 class TestWriters(unittest.TestCase):
     def setUp(self):
         self.valid_files = [
             ([
-                ('f o  o', 'bar\n\nbaz', 'AACCGG',
+                ('f o  o', 'bar\n\nbaz', 'AaCcGg',
                  [16, 17, 18, 19, 20, 21]),
-                ('bar', 'baz foo', 'TTGGCC',
+                ('bar', 'baz foo', 'TtGgCc',
                  [23, 22, 21, 20, 19, 18]),
-                ('ba\n\t\tz', 'foo bar', 'GATTTC',
+                ('ba\n\t\tz', 'foo bar', 'gAtTtC',
                  [20, 21, 22, 23, 24, 18])
             ], [
                 ({'variant': 'sanger'},
@@ -352,34 +480,52 @@ class TestWriters(unittest.TestCase):
             for kwargs, expected_fp in kwargs_expected_fp:
                 def gen():
                     for c in components:
-                        yield BiologicalSequence(
-                            c[2], id=c[0], description=c[1], quality=c[3])
+                        yield Sequence(
+                            c[2], metadata={'id': c[0], 'description': c[1]},
+                            positional_metadata={'quality': c[3]})
 
-                fh = StringIO()
+                fh = io.StringIO()
                 _generator_to_fastq(gen(), fh, **kwargs)
                 observed = fh.getvalue()
                 fh.close()
 
-                with open(expected_fp, 'U') as f:
+                with io.open(expected_fp) as f:
                     expected = f.read()
 
                 self.assertEqual(observed, expected)
 
     def test_sequence_to_fastq_kwargs_passed(self):
-        for constructor in [BiologicalSequence, NucleotideSequence,
-                            DNASequence, RNASequence, ProteinSequence]:
+        for constructor in [Sequence, DNA, RNA, Protein]:
             for components, kwargs_expected_fp in self.valid_files:
-                for kwargs, expected_fp in kwargs_expected_fp:
-                    fh = StringIO()
+                for expected_kwargs, expected_fp in kwargs_expected_fp:
+
+                    observed_kwargs = {}
+                    # TODO:
+                    # some of the test files contain characters which are
+                    # invalid for RNA, so don't validate for now. Need to
+                    # fix this
+                    if constructor is RNA:
+                        observed_kwargs['validate'] = False
+
+                    # Can't use partials for this because the read
+                    # function below can't operate on partials
+                    if hasattr(constructor, 'lowercase'):
+                        expected_kwargs['lowercase'] = 'introns'
+                        observed_kwargs['lowercase'] = 'introns'
+
+                    fh = io.StringIO()
                     for c in components:
-                        obj = constructor(c[2], id=c[0], description=c[1],
-                                          quality=c[3])
-                        write(obj, into=fh, format='fastq', **kwargs)
+                        obj = constructor(
+                            c[2],
+                            metadata={'id': c[0], 'description': c[1]},
+                            positional_metadata={'quality': c[3]},
+                            **observed_kwargs)
+                        write(obj, into=fh, format='fastq', **expected_kwargs)
 
                     observed = fh.getvalue()
                     fh.close()
 
-                    with open(expected_fp, 'U') as f:
+                    with io.open(expected_fp) as f:
                         expected = f.read()
 
                     self.assertEqual(observed, expected)
@@ -388,15 +534,18 @@ class TestWriters(unittest.TestCase):
         for components, kwargs_expected_fp in self.valid_files:
             for kwargs, expected_fp in kwargs_expected_fp:
                 obj = SequenceCollection([
-                    NucleotideSequence(c[2], id=c[0], description=c[1],
-                                       quality=c[3]) for c in components])
+                    DNA(c[2], metadata={'id': c[0], 'description': c[1]},
+                        positional_metadata={'quality': c[3]},
+                        lowercase='introns')
+                    for c in components])
 
-                fh = StringIO()
+                fh = io.StringIO()
+                kwargs['lowercase'] = 'introns'
                 _sequence_collection_to_fastq(obj, fh, **kwargs)
                 observed = fh.getvalue()
                 fh.close()
 
-                with open(expected_fp, 'U') as f:
+                with io.open(expected_fp) as f:
                     expected = f.read()
 
                 self.assertEqual(observed, expected)
@@ -405,27 +554,31 @@ class TestWriters(unittest.TestCase):
         for components, kwargs_expected_fp in self.valid_files:
             for kwargs, expected_fp in kwargs_expected_fp:
                 obj = Alignment([
-                    ProteinSequence(c[2], id=c[0], description=c[1],
-                                    quality=c[3]) for c in components])
+                    Protein(c[2], metadata={'id': c[0], 'description': c[1]},
+                            positional_metadata={'quality': c[3]},
+                            lowercase='introns')
+                    for c in components])
 
-                fh = StringIO()
+                fh = io.StringIO()
+                kwargs['lowercase'] = 'introns'
                 _alignment_to_fastq(obj, fh, **kwargs)
                 observed = fh.getvalue()
                 fh.close()
 
-                with open(expected_fp, 'U') as f:
+                with io.open(expected_fp) as f:
                     expected = f.read()
 
                 self.assertEqual(observed, expected)
 
     def test_generator_to_fastq_no_qual(self):
         def gen():
-            yield BiologicalSequence('ACGT', id='foo', description='bar',
-                                     quality=range(4))
-            yield BiologicalSequence('ACG', id='foo', description='bar')
+            yield Sequence('ACGT',
+                           metadata={'id': 'foo', 'description': 'bar'},
+                           positional_metadata={'quality': range(4)})
+            yield Sequence('ACG', metadata={'id': 'foo', 'description': 'bar'})
 
-        with self.assertRaisesRegexp(ValueError, '2nd.*quality scores'):
-            _generator_to_fastq(gen(), StringIO(), variant='illumina1.8')
+        with six.assertRaisesRegex(self, ValueError, '2nd.*quality scores'):
+            _generator_to_fastq(gen(), io.StringIO(), variant='illumina1.8')
 
 
 class TestConversions(unittest.TestCase):
@@ -532,7 +685,7 @@ class TestConversions(unittest.TestCase):
         for from_fp, to_fp, kwargs in self.conversions:
             for from_kwargs, to_kwargs in kwargs:
                 read_gen = _fastq_to_generator(from_fp, **from_kwargs)
-                fh = StringIO()
+                fh = io.StringIO()
 
                 # will issue warning when truncating quality scores
                 with warnings.catch_warnings(record=True):
@@ -542,7 +695,7 @@ class TestConversions(unittest.TestCase):
                 obs = fh.getvalue()
                 fh.close()
 
-                with open(to_fp, 'U') as fh:
+                with io.open(to_fp) as fh:
                     exp = fh.read()
                 self.assertEqual(obs, exp)
 
diff --git a/skbio/io/tests/test_lsmat.py b/skbio/io/format/tests/test_lsmat.py
similarity index 97%
rename from skbio/io/tests/test_lsmat.py
rename to skbio/io/format/tests/test_lsmat.py
index f66f06e..44fab52 100644
--- a/skbio/io/tests/test_lsmat.py
+++ b/skbio/io/format/tests/test_lsmat.py
@@ -7,13 +7,14 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-from six import StringIO
+import six
 
 from unittest import TestCase, main
 
+from skbio.io._fileobject import StringIO
 from skbio import DistanceMatrix
 from skbio.io import LSMatFormatError
-from skbio.io.lsmat import (
+from skbio.io.format.lsmat import (
     _lsmat_to_dissimilarity_matrix, _lsmat_to_distance_matrix,
     _dissimilarity_matrix_to_lsmat, _distance_matrix_to_lsmat, _lsmat_sniffer)
 from skbio.stats.distance import DissimilarityMatrix, DistanceMatrixError
@@ -120,8 +121,8 @@ class DissimilarityAndDistanceMatrixReaderWriterTests(LSMatTestData):
     def test_read_invalid_files(self):
         for fn in _lsmat_to_dissimilarity_matrix, _lsmat_to_distance_matrix:
             for invalid_fh, error_msg_regexp in self.invalid_fhs:
-                with self.assertRaisesRegexp(LSMatFormatError,
-                                             error_msg_regexp):
+                with six.assertRaisesRegex(self, LSMatFormatError,
+                                           error_msg_regexp):
                     invalid_fh.seek(0)
                     fn(invalid_fh)
 
diff --git a/skbio/io/tests/test_newick.py b/skbio/io/format/tests/test_newick.py
similarity index 98%
rename from skbio/io/tests/test_newick.py
rename to skbio/io/format/tests/test_newick.py
index 5d549c3..1e3bd14 100644
--- a/skbio/io/tests/test_newick.py
+++ b/skbio/io/format/tests/test_newick.py
@@ -7,14 +7,14 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-from six import StringIO
 
 import unittest
 
 from skbio import TreeNode
 from skbio.io import NewickFormatError
-from skbio.io.newick import (_newick_to_tree_node, _tree_node_to_newick,
-                             _newick_sniffer)
+from skbio.io.format.newick import (
+    _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer)
+from skbio.io._fileobject import StringIO
 
 
 class TestNewick(unittest.TestCase):
@@ -349,7 +349,7 @@ class TestNewick(unittest.TestCase):
         tree = _newick_to_tree_node(fh, convert_underscores=False)
         fh2 = StringIO()
         _tree_node_to_newick(tree, fh2)
-        self.assertEquals(fh2.getvalue(), "('_':0.1,'_a','_b')'__';\n")
+        self.assertEqual(fh2.getvalue(), "('_':0.1,'_a','_b')'__';\n")
         fh2.close()
         fh.close()
 
diff --git a/skbio/io/tests/test_ordination.py b/skbio/io/format/tests/test_ordination.py
similarity index 97%
rename from skbio/io/tests/test_ordination.py
rename to skbio/io/format/tests/test_ordination.py
index 5075cbc..327768b 100644
--- a/skbio/io/tests/test_ordination.py
+++ b/skbio/io/format/tests/test_ordination.py
@@ -7,15 +7,16 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-from six import StringIO
+import six
 
+import io
 from unittest import TestCase, main
 
 import numpy as np
 import numpy.testing as npt
 
 from skbio.io import OrdinationFormatError
-from skbio.io.ordination import (
+from skbio.io.format.ordination import (
     _ordination_to_ordination_results, _ordination_results_to_ordination,
     _ordination_sniffer)
 from skbio.stats.ordination import (
@@ -176,18 +177,18 @@ class OrdinationResultsReaderWriterTests(OrdinationTestData):
 
     def test_read_invalid_files(self):
         for invalid_fp, error_msg_regexp, _ in self.invalid_fps:
-            with self.assertRaisesRegexp(OrdinationFormatError,
-                                         error_msg_regexp):
+            with six.assertRaisesRegex(self, OrdinationFormatError,
+                                       error_msg_regexp):
                 _ordination_to_ordination_results(invalid_fp)
 
     def test_write(self):
         for fp, obj in zip(self.valid_fps, self.ordination_results_objs):
-            fh = StringIO()
+            fh = io.StringIO()
             _ordination_results_to_ordination(obj, fh)
             obs = fh.getvalue()
             fh.close()
 
-            with open(fp, 'U') as fh:
+            with io.open(fp) as fh:
                 exp = fh.read()
 
             npt.assert_equal(obs, exp)
@@ -198,7 +199,7 @@ class OrdinationResultsReaderWriterTests(OrdinationTestData):
             obj1 = _ordination_to_ordination_results(fp)
 
             # Write.
-            fh = StringIO()
+            fh = io.StringIO()
             _ordination_results_to_ordination(obj1, fh)
             fh.seek(0)
 
diff --git a/skbio/io/tests/test_phylip.py b/skbio/io/format/tests/test_phylip.py
similarity index 66%
rename from skbio/io/tests/test_phylip.py
rename to skbio/io/format/tests/test_phylip.py
index 3cf9ec0..3e07f60 100644
--- a/skbio/io/tests/test_phylip.py
+++ b/skbio/io/format/tests/test_phylip.py
@@ -7,12 +7,13 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-from six import StringIO
+import six
 
+import io
 from unittest import TestCase, main
 
 from skbio.io import PhylipFormatError
-from skbio.io.phylip import _alignment_to_phylip
+from skbio.io.format.phylip import _alignment_to_phylip
 from skbio import Alignment, DNA, RNA
 from skbio.util import get_data_path
 
@@ -21,33 +22,33 @@ class AlignmentWriterTests(TestCase):
     def setUp(self):
         # ids all same length, seqs longer than 10 chars
         dna_3_seqs = Alignment([
-            DNA('..ACC-GTTGG..', id="d1"),
-            DNA('TTACCGGT-GGCC', id="d2"),
-            DNA('.-ACC-GTTGC--', id="d3")])
+            DNA('..ACC-GTTGG..', metadata={'id': "d1"}),
+            DNA('TTACCGGT-GGCC', metadata={'id': "d2"}),
+            DNA('.-ACC-GTTGC--', metadata={'id': "d3"})])
 
         # id lengths from 0 to 10, with mixes of numbers, characters, and
         # spaces. sequence characters are a mix of cases and gap characters.
         # sequences are shorter than 10 chars
         variable_length_ids = Alignment([
-            RNA('.-ACGU'),
-            RNA('UGCA-.', id='a'),
-            RNA('.ACGU-', id='bb'),
-            RNA('ugca-.', id='1'),
-            RNA('AaAaAa', id='abcdefghij'),
-            RNA('GGGGGG', id='ab def42ij')])
+            RNA('.-ACGU', metadata={'id': ''}),
+            RNA('UGCA-.', metadata={'id': 'a'}),
+            RNA('.ACGU-', metadata={'id': 'bb'}),
+            RNA('ugca-.', metadata={'id': '1'}, validate=False),
+            RNA('AaAaAa', metadata={'id': 'abcdefghij'}, validate=False),
+            RNA('GGGGGG', metadata={'id': 'ab def42ij'})])
 
         # sequences with 20 chars = exactly two chunks of size 10
         two_chunks = Alignment([
-            DNA('..ACC-GTTGG..AATGC.C', id='foo'),
-            DNA('TTACCGGT-GGCCTA-GCAT', id='bar')])
+            DNA('..ACC-GTTGG..AATGC.C', metadata={'id': 'foo'}),
+            DNA('TTACCGGT-GGCCTA-GCAT', metadata={'id': 'bar'})])
 
         # single sequence with more than two chunks
         single_seq_long = Alignment([
-            DNA('..ACC-GTTGG..AATGC.C----', id='foo')])
+            DNA('..ACC-GTTGG..AATGC.C----', metadata={'id': 'foo'})])
 
         # single sequence with only a single character (minimal writeable
         # alignment)
-        single_seq_short = Alignment([DNA('-')])
+        single_seq_short = Alignment([DNA('-', metadata={'id': ''})])
 
         # alignments that can be written in phylip format
         self.objs = [dna_3_seqs, variable_length_ids, two_chunks,
@@ -64,31 +65,32 @@ class AlignmentWriterTests(TestCase):
             (Alignment([]), 'one sequence'),
 
             # no positions
-            (Alignment([DNA('', id="d1"),
-                        DNA('', id="d2")]), 'one position'),
+            (Alignment([DNA('', metadata={'id': "d1"}),
+                        DNA('', metadata={'id': "d2"})]), 'one position'),
 
             # ids too long
-            (Alignment([RNA('ACGU', id="foo"),
-                        RNA('UGCA', id="alongsequenceid")]),
+            (Alignment([RNA('ACGU', metadata={'id': "foo"}),
+                        RNA('UGCA', metadata={'id': "alongsequenceid"})]),
              '10.*alongsequenceid')
         ]
 
     def test_write(self):
         for fp, obj in zip(self.fps, self.objs):
-            fh = StringIO()
+            fh = io.StringIO()
             _alignment_to_phylip(obj, fh)
             obs = fh.getvalue()
             fh.close()
 
-            with open(fp, 'U') as fh:
+            with io.open(fp) as fh:
                 exp = fh.read()
 
             self.assertEqual(obs, exp)
 
     def test_write_invalid_alignment(self):
         for invalid_obj, error_msg_regexp in self.invalid_objs:
-            fh = StringIO()
-            with self.assertRaisesRegexp(PhylipFormatError, error_msg_regexp):
+            fh = io.StringIO()
+            with six.assertRaisesRegex(self, PhylipFormatError,
+                                       error_msg_regexp):
                 _alignment_to_phylip(invalid_obj, fh)
 
             # ensure nothing was written to the file before the error was
diff --git a/skbio/io/tests/test_qseq.py b/skbio/io/format/tests/test_qseq.py
similarity index 50%
rename from skbio/io/tests/test_qseq.py
rename to skbio/io/format/tests/test_qseq.py
index d98a82f..74cb7d0 100644
--- a/skbio/io/tests/test_qseq.py
+++ b/skbio/io/format/tests/test_qseq.py
@@ -5,19 +5,20 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
 from __future__ import absolute_import, division, print_function
 
 from future.builtins import zip
 
 import unittest
-from skbio import (SequenceCollection, BiologicalSequence, NucleotideSequence,
-                   DNASequence, RNASequence, ProteinSequence)
 
+from skbio import SequenceCollection, Sequence, DNA, RNA, Protein
 from skbio import read
 from skbio.util import get_data_path
 from skbio.io import QSeqFormatError
-from skbio.io.qseq import (_qseq_to_generator,
-                           _qseq_to_sequence_collection, _qseq_sniffer)
+from skbio.io.format.qseq import (
+    _qseq_to_generator, _qseq_to_sequence_collection, _qseq_sniffer)
+import numpy as np
 
 
 def _drop_kwargs(kwargs, *args):
@@ -33,45 +34,108 @@ class TestQSeqBase(unittest.TestCase):
                 {'variant': 'sanger'},
                 {'phred_offset': 33},
             ], [
-                ('sanger_1:3:34:-30:30#0/2',
-                 'ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACGTACGT'
-                 'ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC', [
-                     26, 26, 29, 31, 33, 34, 36, 37, 38, 39, 41, 42,
-                     43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
-                     55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
-                     67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
-                     79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
-                     91, 92, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93,
-                     93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93,
-                     93, 93, 93, 93, 93, 93, 93, 93, 93, 93])
+                {'id': 'sanger_1:3:34:-30:30#0/2',
+                 'sequence': 'ACGTACGTACGTACGTACGTACGTACTTTTTTTTTTACGTACGTACG'
+                             'TACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC',
+                 'quality': [26, 26, 29, 31, 33, 34, 36, 37, 38, 39, 41, 42,
+                             43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+                             55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+                             67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+                             79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+                             91, 92, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93,
+                             93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93,
+                             93, 93, 93, 93, 93, 93, 93, 93, 93, 93],
+                 'machine_name': 'sanger',
+                 'run_number': 1,
+                 'lane_number': 3,
+                 'tile_number': 34,
+                 'x': -30,
+                 'y': 30,
+                 'index': 0,
+                 'read_number': 2}
             ]),
 
             (get_data_path('qseq_multi_seq_illumina1.3'), [
                 {'variant': 'illumina1.3'},
                 {'phred_offset': 64}
             ], [
-                ('illumina_1:3:34:-30:30#0/1', 'ACG....ACGTAC', [
-                    50, 53, 2, 2, 2, 2, 50, 2, 3, 5, 6, 7, 8]),
-                ('illumina_1:3:35:-30:30#0/2', 'ACGTA.AATAAAC', [
-                    39, 37, 20, 33, 1, 33, 38, 40, 55, 49, 1, 1, 38])
+                {'id': 'illumina_1:3:34:-30:30#0/1',
+                 'sequence': 'ACG....ACGTAC',
+                 'quality': [50, 53, 2, 2, 2, 2, 50, 2, 3, 5, 6, 7, 8],
+                 'machine_name': 'illumina',
+                 'run_number': 1,
+                 'lane_number': 3,
+                 'tile_number': 34,
+                 'x': -30,
+                 'y': 30,
+                 'index': 0,
+                 'read_number': 1},
+                {'id': 'illumina_1:3:35:-30:30#0/2',
+                 'sequence': 'ACGTA.AATAAAC',
+                 'quality': [39, 37, 20, 33, 1, 33, 38, 40, 55, 49, 1, 1, 38],
+                 'machine_name': 'illumina',
+                 'run_number': 1,
+                 'lane_number': 3,
+                 'tile_number': 35,
+                 'x': -30,
+                 'y': 30,
+                 'index': 0,
+                 'read_number': 2}
             ]),
 
             (get_data_path('qseq_multi_seq_illumina1.3'), [
                 {'variant': 'illumina1.3', 'filter': False, 'seq_num': 1},
                 {'phred_offset': 64, 'filter': False, 'seq_num': 2},
                 {'variant': 'illumina1.3', 'filter': False, 'seq_num': 3,
-                 'constructor': ProteinSequence},
+                 'constructor': Protein},
                 {'phred_offset': 64, 'filter': False, 'seq_num': 4,
-                 'constructor': DNASequence},
+                 'constructor': DNA},
             ], [
-                ('illumina_1:3:34:-30:30#0/1', 'ACG....ACGTAC', [
-                    50, 53, 2, 2, 2, 2, 50, 2, 3, 5, 6, 7, 8]),
-                ('illumina_1:3:34:30:-30#0/1', 'CGGGCATTGCA', [
-                    3, 7, 7, 7, 3, 33, 51, 36, 7, 3, 1]),
-                ('illumina_1:3:35:-30:30#0/2', 'ACGTA.AATAAAC', [
-                    39, 37, 20, 33, 1, 33, 38, 40, 55, 49, 1, 1, 38]),
-                ('illumina_1:3:35:30:-30#0/3', 'CATTTAGGA.TGCA', [
-                    52, 42, 38, 44, 43, 1, 6, 46, 43, 11, 39, 40, 54, 13])
+                {'id': 'illumina_1:3:34:-30:30#0/1',
+                 'sequence': 'ACG....ACGTAC',
+                 'quality': [50, 53, 2, 2, 2, 2, 50, 2, 3, 5, 6, 7, 8],
+                 'machine_name': 'illumina',
+                 'run_number': 1,
+                 'lane_number': 3,
+                 'tile_number': 34,
+                 'x': -30,
+                 'y': 30,
+                 'index': 0,
+                 'read_number': 1},
+                {'id': 'illumina_1:3:34:30:-30#0/1',
+                 'sequence': 'CGGGCATTGCA',
+                 'quality': [3, 7, 7, 7, 3, 33, 51, 36, 7, 3, 1],
+                 'machine_name': 'illumina',
+                 'run_number': 1,
+                 'lane_number': 3,
+                 'tile_number': 34,
+                 'x': 30,
+                 'y': -30,
+                 'index': 0,
+                 'read_number': 1},
+                {'id': 'illumina_1:3:35:-30:30#0/2',
+                 'sequence': 'ACGTA.AATAAAC',
+                 'quality': [39, 37, 20, 33, 1, 33, 38, 40, 55, 49, 1, 1, 38],
+                 'machine_name': 'illumina',
+                 'run_number': 1,
+                 'lane_number': 3,
+                 'tile_number': 35,
+                 'x': -30,
+                 'y': 30,
+                 'index': 0,
+                 'read_number': 2},
+                {'id': 'illumina_1:3:35:30:-30#0/3',
+                 'sequence': 'CATTTAGGA.TGCA',
+                 'quality': [52, 42, 38, 44, 43, 1, 6, 46, 43, 11, 39, 40, 54,
+                             13],
+                 'machine_name': 'illumina',
+                 'run_number': 1,
+                 'lane_number': 3,
+                 'tile_number': 35,
+                 'x': 30,
+                 'y': -30,
+                 'index': 0,
+                 'read_number': 3}
             ])
         ]
 
@@ -201,14 +265,27 @@ class TestQSeqToGenerator(TestQSeqBase):
         for valid, kwargs, components in self.valid_files:
             for kwarg in kwargs:
                 _drop_kwargs(kwarg, 'seq_num')
-                constructor = kwarg.get('constructor', BiologicalSequence)
-                expected = [constructor(c[1], id=c[0], quality=c[2]) for
-                            c in components]
+                constructor = kwarg.get('constructor', Sequence)
+                expected = [
+                    constructor(
+                        c['sequence'],
+                        metadata={'id': c['id'],
+                                  'machine_name': c['machine_name'],
+                                  'run_number': c['run_number'],
+                                  'lane_number': c['lane_number'],
+                                  'tile_number': c['tile_number'],
+                                  'x': c['x'],
+                                  'y': c['y'],
+                                  'index': c['index'],
+                                  'read_number': c['read_number']},
+                        positional_metadata={
+                            'quality': np.array(c['quality'], dtype=np.uint8)})
+                    for c in components]
 
                 observed = list(_qseq_to_generator(valid, **kwarg))
                 self.assertEqual(len(expected), len(observed))
                 for o, e in zip(observed, expected):
-                    self.assertTrue(o.equals(e))
+                    self.assertEqual(o, e)
 
 
 class TestQSeqToSequenceCollection(TestQSeqBase):
@@ -232,22 +309,30 @@ class TestQSeqToSequenceCollection(TestQSeqBase):
         for valid, kwargs, components in self.valid_files:
             for kwarg in kwargs:
                 _drop_kwargs(kwarg, 'seq_num')
-                constructor = kwarg.get('constructor', BiologicalSequence)
-                expected = SequenceCollection([constructor(c[1], id=c[0],
-                                               quality=c[2]) for c in
-                                               components])
+                constructor = kwarg.get('constructor', Sequence)
+                expected = SequenceCollection([
+                    constructor(
+                        c['sequence'],
+                        metadata={'id': c['id'],
+                                  'machine_name': c['machine_name'],
+                                  'run_number': c['run_number'],
+                                  'lane_number': c['lane_number'],
+                                  'tile_number': c['tile_number'],
+                                  'x': c['x'],
+                                  'y': c['y'],
+                                  'index': c['index'],
+                                  'read_number': c['read_number']},
+                        positional_metadata={
+                            'quality': np.array(c['quality'], dtype=np.uint8)})
+                    for c in components])
 
                 observed = _qseq_to_sequence_collection(valid, **kwarg)
-                # TODO remove when #656 is resolved
                 self.assertEqual(observed, expected)
-                for o, e in zip(observed, expected):
-                    self.assertTrue(o.equals(e))
 
 
 class TestQSeqToSequences(TestQSeqBase):
     def test_invalid_files(self):
-        for constructor in [BiologicalSequence, NucleotideSequence,
-                            DNASequence, RNASequence, ProteinSequence]:
+        for constructor in [Sequence, DNA, RNA, Protein]:
             for invalid, kwargs, errors, etype in self.invalid_files:
                 with self.assertRaises(etype) as cm:
                     for kwarg in kwargs:
@@ -259,19 +344,38 @@ class TestQSeqToSequences(TestQSeqBase):
                     self.assertIn(e, str(cm.exception))
 
     def test_valid_files(self):
-        for constructor in [BiologicalSequence, NucleotideSequence,
-                            DNASequence, RNASequence, ProteinSequence]:
+        for constructor in [Sequence, DNA, RNA, Protein]:
             for valid, kwargs, components in self.valid_files:
-                for kwarg in kwargs:
-                    _drop_kwargs(kwarg, 'constructor', 'filter')
-
-                    seq_num = kwarg.get('seq_num', 1)
+                for observed_kwargs in kwargs:
+                    expected_kwargs = {}
+                    # Currently not validating the alphabet for qseq
+                    # files that are read in for this test.
+                    if hasattr(constructor, 'alphabet'):
+                        observed_kwargs['validate'] = False
+                        expected_kwargs['validate'] = False
+                    _drop_kwargs(observed_kwargs, 'constructor', 'filter')
+
+                    seq_num = observed_kwargs.get('seq_num', 1)
                     c = components[seq_num - 1]
-                    expected = constructor(c[1], id=c[0], quality=c[2])
-
-                    observed = read(valid, into=constructor, format='qseq',
-                                    verify=False, **kwarg)
-                    self.assertTrue(observed.equals(expected))
+                    expected = constructor(
+                        c['sequence'],
+                        metadata={'id': c['id'],
+                                  'machine_name': c['machine_name'],
+                                  'run_number': c['run_number'],
+                                  'lane_number': c['lane_number'],
+                                  'tile_number': c['tile_number'],
+                                  'x': c['x'],
+                                  'y': c['y'],
+                                  'index': c['index'],
+                                  'read_number': c['read_number']},
+                        positional_metadata={
+                            'quality': np.array(c['quality'], np.uint8)},
+                        **expected_kwargs)
+
+                    observed = read(valid, into=constructor,
+                                    format='qseq', verify=False,
+                                    **observed_kwargs)
+                    self.assertEqual(observed, expected)
 
 
 class TestQSeqSniffer(TestQSeqBase):
diff --git a/skbio/io/registry.py b/skbio/io/registry.py
new file mode 100644
index 0000000..0e18451
--- /dev/null
+++ b/skbio/io/registry.py
@@ -0,0 +1,1145 @@
+r"""
+I/O Registry (:mod:`skbio.io.registry`)
+=======================================
+
+.. currentmodule:: skbio.io.registry
+
+Classes
+-------
+
+.. autosummary::
+   :toctree: generated/
+
+   IORegistry
+   Format
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+   create_format
+
+Exceptions
+----------
+
+.. autosummary::
+   :toctree: generated/
+
+   DuplicateRegistrationError
+   InvalidRegistrationError
+
+
+Creating a new format for scikit-bio
+------------------------------------
+scikit-bio makes it simple to add new file formats to its I/O registry.
+scikit-bio maintains a singleton of the :class:`IORegistry` class called
+`io_registry`. This is where all scikit-bio file formats are registered. One
+could also instantiate their own :class:`IORegistry`, but that is not the focus
+of this tutorial.
+
+The first step to creating a new format is to add a submodule in
+`skbio/io/format/` named after the file format you are implementing.
+For example, if the format you are implementing is called `myformat` then you
+would create a file called `skbio/io/format/myformat.py`.
+
+The next step is to import the :func:`create_format` factory from
+:mod:`skbio.io`. This will allow you to create a new :class:`Format` object
+that `io_registry` will know about.
+
+Ideally you should name the result of :func:`create_format` as your file name.
+For example:
+
+.. code-block:: python
+
+   from skbio.io import create_format
+
+   myformat = create_format('myformat')
+
+The `myformat` object is what we will use to register our new functionality.
+At this point you should evaulate whether your format is binary or text.
+If your format is binary, your :func:`create_format` call should look like
+this:
+
+.. code-block:: python
+
+   myformat = create_format('myformat', encoding='binary')
+
+Alternatively if your format is text and has a specific encoding or newline
+handling you can also specify that:
+
+.. code-block:: python
+
+   myformat = create_format('myformat', encoding='ascii', newline='\n')
+
+This will ensure that our registry will open files with a default encoding of
+`'ascii'` for `'myformat'` and expect all newlines to be `'\n'` characters.
+
+Having worked out these details, we are ready to register the actual
+functionality of our format (e.g., sniffer, readers, and writers).
+
+To create a sniffer simply decorate the following onto your sniffer function:
+
+.. code-block:: python
+
+   @myformat.sniffer()
+   def _myformat_sniffer(fh):
+       # do something with `fh` to determine the membership of the file
+
+For futher details on sniffer functions see :func:`Format.sniffer`.
+
+Creating a reader is very similar, but has one difference:
+
+.. code-block:: python
+
+   @myformat.reader(SomeSkbioClass)
+   def _myformat_to_some_skbio_class(fh, kwarg1='default', extra=FileSentinel):
+       # parse `fh` and return a SomeSkbioClass instance here
+       # `extra` will also be an open filehandle if provided else None
+
+Here we bound a function to a specific class. We also demonstrated using
+our FileSentinel object to indicate to the registry that this reader can take
+auxilary files that should be handled in the same way as the primary file.
+For futher details on reader functions see :func:`Format.reader`.
+
+Creating a writer is about the same:
+
+.. code-block:: python
+
+   @myformat.writer(SomeSkbioClass)
+   def _some_skbio_class_to_myformat(obj, fh, kwarg1='whatever',
+                                     extra=FileSentinel):
+       # write the contents of `obj` into `fh` and whatever else into `extra`
+       # do not return anything, it will be ignored
+
+This is exactly the same as the `reader` above just in reverse, we also
+receive the object we are writing as the first parameter instead of the file
+(which is the second one). For further details on writer functions see
+:func:`Format.writer`.
+
+.. note:: When raising errors in readers and writers, the error should be a
+   subclass of ``FileFormatError`` specific to your new format.
+
+Once you are satisfied with the functionality, you will need to ensure that
+`skbio/io/__init__.py` contains an import of your new submodule so the
+decorators are executed. Add the function
+``import_module('skbio.io.format.myformat')`` with your module name to the
+existing list.
+
+.. note:: Because scikit-bio handles all of the I/O boilerplate, you only need
+   to unit-test the actual business logic of your `readers`, `writers`, and
+   `sniffers`.
+
+Reserved Keyword Arguments
+--------------------------
+The following keyword args may not be used when defining new `readers` or
+`writers` as they already have special meaning to the registry system:
+
+- `format`
+- `into`
+- `verify`
+- `mode`
+- `encoding`
+- `errors`
+- `newline`
+- `compression`
+- `compresslevel`
+
+The following are not yet used but should be avoided as well:
+
+- `auth`
+- `user`
+- `password`
+- `buffering`
+- `buffer_size`
+- `closefd`
+- `exclusive`
+- `append`
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from warnings import warn
+import types
+import traceback
+import itertools
+import inspect
+from functools import wraps
+
+from future.builtins import zip
+
+from ._exception import DuplicateRegistrationError, InvalidRegistrationError
+from . import (UnrecognizedFormatError, ArgumentOverrideWarning,
+               FormatIdentificationWarning)
+from .util import _resolve_file, open_file, open_files, _d as _open_kwargs
+from skbio.util._misc import make_sentinel, find_sentinels
+from skbio.util._decorator import stable
+
+FileSentinel = make_sentinel("FileSentinel")
+
+
+class IORegistry(object):
+    """Create a registry of formats and implementations which map to classes.
+
+    """
+
+    @stable(as_of="0.4.0")
+    def __init__(self):
+        # This seperation of binary and text formats is useful because there
+        # are many situations where we may have recieved a text-file. When this
+        # happens, the binary data fundamentally does not exist. We could
+        # assume encoding should be interpreted in reverse, however this misses
+        # the bigger point: why would the user ever want text to be treated as
+        # binary? They already went through the effort to hand us text.
+        # Therefore, during format resolution, we should skip the binary
+        # formats if they are irrelevant. (They are incompatible with such a
+        # filehandle anyways.)
+        self._binary_formats = {}
+        self._text_formats = {}
+        self._lookups = (self._binary_formats, self._text_formats)
+
+    @stable(as_of="0.4.0")
+    def create_format(self, *args, **kwargs):
+        """A simple factory for creating new file formats.
+
+        This will automatically register the format with this regsistry.
+
+        All arguments are passed through to the Format constructor.
+
+        Returns
+        -------
+        Format
+            A new format that is registered with the registry.
+
+        """
+        format = Format(*args, **kwargs)
+        self.add_format(format)
+        return format
+
+    @stable(as_of="0.4.0")
+    def add_format(self, format_object):
+        """Add a format to the registry.
+
+        Parameters
+        ----------
+        format_object : Format
+            The format to add to the registry.
+
+        """
+        # See comment in the constructor for an explanation for why this split
+        # occurs.
+        name = format_object.name
+        if name in self._binary_formats or name in self._text_formats:
+            raise DuplicateRegistrationError("A format already exists with"
+                                             " that name: %s" % name)
+
+        if format_object.is_binary_format:
+            self._binary_formats[name] = format_object
+        else:
+            self._text_formats[name] = format_object
+
+    @stable(as_of="0.4.0")
+    def get_sniffer(self, format_name):
+        """Locate the sniffer for a format.
+
+        Parameters
+        ----------
+        format_name : str
+            The name of the format to lookup.
+
+        Returns
+        -------
+        function or None
+            The sniffer associated with `format_name`
+
+        """
+        for lookup in self._lookups:
+            if format_name in lookup:
+                return lookup[format_name].sniffer_function
+        return None
+
+    @stable(as_of="0.4.0")
+    def get_reader(self, format_name, cls):
+        """Locate the reader for a format and class.
+
+        Parameters
+        ----------
+        format_name : str
+            The name of the format to lookup.
+        cls : type or None
+            The class which the reader will return an instance of. If `cls` is
+            None, the reader will return a generator.
+            Default is None.
+
+        Returns
+        -------
+        function or None
+            The reader associated with `format_name` and `cls`
+
+        """
+        return self._get_rw(format_name, cls, 'readers')
+
+    @stable(as_of="0.4.0")
+    def get_writer(self, format_name, cls):
+        """Locate the writer for a format and class.
+
+        Parameters
+        ----------
+        format_name : str
+            The name of the format to lookup.
+        cls : type or None
+            The class which the writer will expect an instance of. If `cls` is
+            None, the writer will expect a generator.
+            Default is None.
+
+        Returns
+        -------
+        function or None
+            The writer associated with `format_name` and `cls`
+
+        """
+        return self._get_rw(format_name, cls, 'writers')
+
+    def _get_rw(self, format_name, cls, lookup_name):
+        for lookup in self._lookups:
+            if format_name in lookup:
+                format_lookup = getattr(lookup[format_name], lookup_name)
+                if cls in format_lookup:
+                    return format_lookup[cls]
+        return None
+
+    @stable(as_of="0.4.0")
+    def list_read_formats(self, cls):
+        """Return a list of available read formats for a given `cls` type.
+
+        Parameters
+        ----------
+        cls : type
+            The class which will be used to determine what read formats exist
+            for an instance of `cls`.
+
+        Returns
+        -------
+        list
+            A list of available read formats for an instance of `cls`. List may
+            be empty.
+
+        """
+        return list(self._iter_rw_formats(cls, 'readers'))
+
+    @stable(as_of="0.4.0")
+    def list_write_formats(self, cls):
+        """Return a list of available write formats for a given `cls` type.
+
+        Parameters
+        ----------
+        cls : type
+            The class which will be used to determine what write formats exist
+            for an instance of `cls`.
+
+        Returns
+        -------
+        list
+            A list of available write formats for an instance of `cls`. List
+            may be empty.
+
+        """
+        return list(self._iter_rw_formats(cls, 'writers'))
+
+    def _iter_rw_formats(self, cls, lookup_name):
+        for lookup in self._lookups:
+            for format in lookup.values():
+                if cls in getattr(format, lookup_name):
+                    yield format.name
+
+    @stable(as_of="0.4.0")
+    def sniff(self, file, **kwargs):
+        """Detect the format of a given `file` and suggest kwargs for reading.
+
+        Parameters
+        ----------
+        file : openable (filepath, URL, filehandle, etc.)
+            The file to sniff. Something that is understood by `skbio.io.open`.
+        kwargs : dict, optional
+            Keyword arguments will be passed to `skbio.io.open`.
+
+        Returns
+        -------
+        (str, dict)
+            The name of the format of the file and any suggested kwargs for
+            use with the corresponding reader.
+
+        Raises
+        ------
+        UnrecognizedFormatError
+            This occurs when the format is not 'claimed' by any registered
+            sniffer or when the format is ambiguous and has been 'claimed' by
+            more than one sniffer.
+
+        """
+        # By resolving the input here, we have the oppurtunity to reuse the
+        # file (which is potentially ephemeral). Each sniffer will also resolve
+        # the file, but that call will short-circuit and won't claim
+        # responsibility for closing the file. This means that the file
+        # should only close after leaving this context. This is also the reason
+        # that we have to use SaneTextIOWrapper because each sniffer will
+        # wrap the file to produce an appropriate default encoding for their
+        # format (if unspecified). This results in the SaneTextIOWrapper being
+        # garbage collected (using io.TextIOBase results in close being called
+        # on our buffer by the deconstructor which we wanted to share with the
+        # next sniffer)
+        with _resolve_file(file, mode='r', **kwargs) as (fh, _,
+                                                         is_binary_file):
+            # tell may fail noisily if the user provided a TextIOBase or
+            # BufferedReader which has already been iterated over (via next()).
+            matches = []
+            backup = fh.tell()
+            if is_binary_file and kwargs.get('encoding', 'binary') == 'binary':
+                matches = self._find_matches(fh, self._binary_formats,
+                                             **kwargs)
+
+            if kwargs.get('encoding', None) != 'binary':
+                # We can always turn a binary file into a text file, but the
+                # reverse doesn't make sense.
+                matches += self._find_matches(fh, self._text_formats, **kwargs)
+                fh.seek(backup)
+            elif not is_binary_file:
+                raise ValueError("Cannot decode text source (%r) as binary."
+                                 % file)
+            # else we are a binary_file and our encoding did not exclude binary
+            # so we have already handled that condition
+
+        if len(matches) > 1:
+            raise UnrecognizedFormatError("File format for %r is ambiguous,"
+                                          " may be one of: %r"
+                                          % (file, [m for m, s in matches]))
+        elif len(matches) == 0:
+            raise UnrecognizedFormatError("Could not detect the format of %r"
+                                          % file)
+
+        return matches[0]
+
+    def _find_matches(self, file, lookup, **kwargs):
+        matches = []
+        for format in lookup.values():
+            if format.sniffer_function is not None:
+                is_format, skwargs = format.sniffer_function(file, **kwargs)
+                file.seek(0)
+                if is_format:
+                    matches.append((format.name, skwargs))
+        return matches
+
+    @stable(as_of="0.4.0")
+    def read(self, file, format=None, into=None, verify=True, **kwargs):
+        """Read `file` as `format` into an object.
+
+        Parameters
+        ----------
+        file : openable (filepath, URL, filehandle, etc.)
+            The file to read. Something that is understood by `skbio.io.open`.
+        format : str, optional
+            The format of the file if known. If None, the format will be
+            inferred from the file.
+        into : type or None, optional
+            The object which will be returned. If None, a generator will be
+            returned.
+        verify : bool, optional
+            When True, will double check the `format` if provided.
+        kwargs : dict, optional
+            Keyword arguments will be passed to their respective handlers
+            (`skbio.io.open` and the reader for `format`)
+
+        Returns
+        -------
+        object or generator
+            An instance of `into` if `into` is not None else generator
+
+        Raises
+        ------
+        ValueError
+            Raised when `format` and `into` are both None.
+        UnrecognizedFormatError
+            Raised when a reader could not be found for a given `format` or the
+            format could not be guessed.
+        FormatIdentificationWarning
+            Raised when `verify` is True and the sniffer of a `format` did
+            not agree that `file` is a member of `format`
+        ArgumentOverrideWarning
+            Raised when `verify` is True and a user-supplied argument is
+            overriding the suggestion provided by the sniffer of `format`.
+
+        """
+        # Context managers do not compose well with generators. We have to
+        # duplicate the logic so that the file will stay open while yielding.
+        # Otherwise the context exits as soon as the generator is returned
+        # (making any iteration fail as the file is closed from its
+        # perspective).
+        if into is None:
+            if format is None:
+                raise ValueError("`into` and `format` cannot both be None")
+            gen = self._read_gen(file, format, into, verify, kwargs)
+            # This is done so that any errors occur immediately instead of
+            # on the first call from __iter__
+            # eta-reduction is possible, but we want to the type to be
+            # GeneratorType
+            return (x for x in itertools.chain([next(gen)], gen))
+        else:
+            return self._read_ret(file, format, into, verify, kwargs)
+
+    def _read_ret(self, file, fmt, into, verify, kwargs):
+        io_kwargs = self._find_io_kwargs(kwargs)
+        with _resolve_file(file, **io_kwargs) as (file, _, _):
+            reader, kwargs = self._init_reader(file, fmt, into, verify, kwargs,
+                                               io_kwargs)
+            return reader(file, **kwargs)
+
+    def _read_gen(self, file, fmt, into, verify, kwargs):
+        io_kwargs = self._find_io_kwargs(kwargs)
+        # We needed to get the io_kwargs from kwargs for things like
+        # _resolve_file and for verifying a format.
+        # kwargs should still retain the contents of io_kwargs because the
+        # actual reader will also need them.
+        with _resolve_file(file, **io_kwargs) as (file, _, _):
+            reader, kwargs = self._init_reader(file, fmt, into, verify, kwargs,
+                                               io_kwargs)
+            generator = reader(file, **kwargs)
+            while True:
+                yield next(generator)
+
+    def _find_io_kwargs(self, kwargs):
+        return {k: kwargs[k] for k in _open_kwargs if k in kwargs}
+
+    def _init_reader(self, file, fmt, into, verify, kwargs, io_kwargs):
+        skwargs = {}
+        if fmt is None:
+            fmt, skwargs = self.sniff(file, **io_kwargs)
+        elif verify:
+            sniffer = self.get_sniffer(fmt)
+            if sniffer is not None:
+                backup = file.tell()
+                is_format, skwargs = sniffer(file, **io_kwargs)
+                file.seek(backup)
+                if not is_format:
+                    warn("%r does not look like a %s file"
+                         % (file, fmt), FormatIdentificationWarning)
+
+        for key in skwargs:
+            if key not in kwargs:
+                kwargs[key] = skwargs[key]
+            elif kwargs[key] != skwargs[key]:
+                warn('Best guess was: %s=%r, continuing with user'
+                     ' supplied: %r' % (key, skwargs[key],
+                                        kwargs[key]),
+                     ArgumentOverrideWarning)
+
+        reader = self.get_reader(fmt, into)
+        if reader is None:
+            raise UnrecognizedFormatError(
+                "Cannot read %r from %r, no %s reader found." %
+                (fmt, file, into.__name__ if into else 'generator'))
+        return reader, kwargs
+
+    @stable(as_of="0.4.0")
+    def write(self, obj, format, into, **kwargs):
+        """Write `obj` as `format` into a file.
+
+        Parameters
+        ----------
+        obj : object
+            The object to write as `format`
+        format : str
+            The format to write `obj` as
+        into : openable (filepath, URL, filehandle, etc.)
+            What to write `obj` to. Something that is understood by
+            `skbio.io.open`.
+        kwargs : dict, optional
+            Keyword arguments will be passed to their respective handlers
+            (`skbio.io.open` and the writer for `format`)
+
+        Returns
+        -------
+        openable (filepath, URL, filehandle, etc.)
+            Will pass back the user argument for `into` as a convenience.
+
+        Raises
+        ------
+        UnrecognizedFormatError
+            Raised when a writer for writing `obj` as `format` could not be
+            found.
+
+        """
+        # The simplest functionality here.
+        cls = None
+        if not isinstance(obj, types.GeneratorType):
+            cls = obj.__class__
+        writer = self.get_writer(format, cls)
+        if writer is None:
+            raise UnrecognizedFormatError(
+                "Cannot write %r into %r, no %s writer found." %
+                (format, into, obj.__class__.__name__))
+
+        writer(obj, into, **kwargs)
+        return into
+
+    @stable(as_of="0.4.0")
+    def monkey_patch(self):
+        """Monkey-patch `read` and `write` methods onto registered classes.
+
+        Will modify classes which have been registered to a reader or writer
+        to have `read` and `write` methods which will contain documentation
+        specifying useable formats for that class.
+
+        The actual functionality will be a pass-through to `skbio.io.read`
+        and `skbio.io.write` respectively.
+        """
+        reads = set()
+        writes = set()
+        for lookup in self._lookups:
+            for format in lookup.values():
+                reads |= format.monkey_patched_readers
+                writes |= format.monkey_patched_writers
+
+        for cls in reads:
+            self._apply_read(cls)
+
+        for cls in writes:
+            self._apply_write(cls)
+
+    def _apply_read(registry, cls):
+        """Add read method if any formats have a reader for `cls`."""
+        read_formats = registry.list_read_formats(cls)
+
+        @classmethod
+        def read(cls, file, format=None, **kwargs):
+            return registry.read(file, into=cls, format=format, **kwargs)
+
+        imports = registry._import_paths(read_formats)
+        doc_list = registry._formats_for_docs(read_formats, imports)
+        read.__func__.__doc__ = _read_docstring % {
+            'name': cls.__name__,
+            'list': doc_list,
+            'see': '\n'.join(imports)
+        }
+        cls.read = read
+
+    def _apply_write(registry, cls):
+        """Add write method if any formats have a writer for `cls`."""
+        write_formats = registry.list_write_formats(cls)
+        if not hasattr(cls, 'default_write_format'):
+            raise NotImplementedError(
+                "Classes with registered writers must provide a "
+                "`default_write_format`. Please add `default_write_format`"
+                " to '%s'." % cls.__name__)
+
+        def write(self, file, format=cls.default_write_format, **kwargs):
+            return registry.write(self, into=file, format=format, **kwargs)
+
+        imports = registry._import_paths(write_formats)
+        doc_list = registry._formats_for_docs(write_formats, imports)
+        write.__doc__ = _write_docstring % {
+            'name': cls.__name__,
+            'list': doc_list,
+            'see': '\n'.join(imports),
+            'default': cls.default_write_format
+        }
+
+        cls.write = write
+
+    def _import_paths(self, formats):
+        lines = []
+        for fmt in formats:
+            lines.append("skbio.io.format." + fmt)
+        return lines
+
+    def _formats_for_docs(self, formats, imports):
+        lines = []
+        for fmt, imp in zip(formats, imports):
+            lines.append("- ``'%s'`` (:mod:`%s`)" % (fmt, imp))
+        return '\n'.join(lines)
+
+
+_read_docstring = """Create a new ``%(name)s`` instance from a file.
+
+This is a convenience method for :func:`skbio.io.registry.read`. For
+more information about the I/O system in scikit-bio, please see
+:mod:`skbio.io`.
+
+Supported file formats include:
+
+%(list)s
+
+Parameters
+----------
+file : openable (filepath, URL, filehandle, etc.)
+    The location to read the given `format`. Something that is
+    understood by :func:`skbio.io.util.open`. Filehandles are not
+    automatically closed, it is the responsibility of the caller.
+format : str, optional
+    The format must be a format name with a reader for ``%(name)s``.
+    If a `format` is not provided or is None, it will attempt to
+    guess the format.
+kwargs : dict, optional
+    Keyword arguments passed to :func:`skbio.io.registry.read` and
+    the file format reader for ``%(name)s``.
+
+Returns
+-------
+%(name)s
+    A new instance.
+
+See Also
+--------
+write
+skbio.io.registry.read
+skbio.io.util.open
+%(see)s
+
+"""
+
+_write_docstring = """Write an instance of ``%(name)s`` to a file.
+
+This is a convenience method for :func:`skbio.io.registry.write`.
+For more information about the I/O system in scikit-bio, please
+see :mod:`skbio.io`.
+
+Supported file formats include:
+
+%(list)s
+
+Parameters
+----------
+file : openable (filepath, URL, filehandle, etc.)
+    The location to write the given `format` into.  Something
+    that is understood by :func:`skbio.io.util.open`. Filehandles
+    are not automatically closed, it is the responsibility of the
+    caller.
+format : str
+    The format must be a registered format name with a writer for
+    ``%(name)s``.
+    Default is `'%(default)s'`.
+kwargs : dict, optional
+    Keyword arguments passed to :func:`skbio.io.registry.write`
+    and the file format writer.
+
+See Also
+--------
+read
+skbio.io.registry.write
+skbio.io.util.open
+%(see)s
+
+"""
+
+
+class Format(object):
+    """Defines a format on which readers/writers/sniffer can be registered.
+
+    Parameters
+    ----------
+    name : str
+        The name of this format.
+    encoding : str, optional
+        What the default encoding of this format is. If set to 'binary' then
+        all registered handlers will receive an :class:`io.BufferedReader` or
+        :class:`io.BufferedWriter` instead of an :class:`io.TextIOBase`. The
+        user will also be unable to override the encoding in that case.
+    newline : str, optional
+        What the default newline handling of this format is. Default is to use
+        universal newline handling.
+
+    """
+    @property
+    @stable(as_of="0.4.0")
+    def name(self):
+        """The name of this format."""
+        return self._name
+
+    @property
+    @stable(as_of="0.4.0")
+    def is_binary_format(self):
+        """Return True if this is a binary format."""
+        return self._encoding == 'binary'
+
+    @property
+    @stable(as_of="0.4.0")
+    def sniffer_function(self):
+        """The sniffer function associated with this format."""
+        return self._sniffer_function
+
+    @property
+    @stable(as_of="0.4.0")
+    def readers(self):
+        """Dictionary that maps classes to their writers for this format."""
+        return self._readers
+
+    @property
+    @stable(as_of="0.4.0")
+    def writers(self):
+        """Dictionary that maps classes to their writers for this format."""
+        return self._writers
+
+    @property
+    @stable(as_of="0.4.0")
+    def monkey_patched_readers(self):
+        """Set of classes bound to readers to monkey patch."""
+        return self._monkey_patch['read']
+
+    @property
+    @stable(as_of="0.4.0")
+    def monkey_patched_writers(self):
+        """Set of classes bound to writers to monkey patch."""
+        return self._monkey_patch['write']
+
+    def __init__(self, name, encoding=None, newline=None):
+        self._encoding = encoding
+        self._newline = newline
+        self._name = name
+
+        self._sniffer_function = None
+        self._readers = {}
+        self._writers = {}
+        self._monkey_patch = {'read': set(), 'write': set()}
+
+    @stable(as_of="0.4.0")
+    def sniffer(self, override=False):
+        """Decorate a function to act as the sniffer for this format.
+
+        The function should take one argument which will be an implementation
+        of either :class:`io.TextIOBase` or :class:`io.BufferedReader`
+        depending on if the format is text or binary, respectively.
+
+        The sniffer will always receive a filehandle which is pointing to the
+        beginning of the file. It must return a tuple of bool and a dict of
+        suggested keyword arguments (if any) to pass to the reader.
+
+        .. note:: Keyword arguments are not permitted in `sniffers`.
+           `Sniffers` may not raise exceptions; if an exception is thrown by a
+           `sniffer`, the user will be asked to report it on our `issue tracker
+           <https://github.com/biocore/scikit-bio/issues/>`_.
+
+
+        Parameters
+        ----------
+        override : bool, optional
+            If True, the existing sniffer will be overriden.
+
+        Raises
+        ------
+        DuplicateRegistrationError
+            When `override` is False and a sniffer is already registered for
+            this format.
+
+        Examples
+        --------
+        >>> from skbio.io.registry import Format
+        >>> # If developing a new format for skbio, use the create_format()
+        >>> # factory instead of this constructor.
+        >>> myformat = Format('myformat')
+        >>> @myformat.sniffer()
+        ... def myformat_sniffer(fh):
+        ...     check = fh.read(8) == "myformat"
+        ...     if check:
+        ...         version = int(fh.read(1))
+        ...         return True, {'version': version}
+        ...     return False, {}
+        ...
+        >>> myformat_sniffer([u"myformat2\\n", u"some content\\n"])
+        (True, {'version': 2})
+        >>> myformat_sniffer([u"something else\\n"])
+        (False, {})
+
+        """
+        if not type(override) is bool:
+            raise InvalidRegistrationError("`override` must be a bool not %r"
+                                           % override)
+
+        if not override and self._sniffer_function is not None:
+            raise DuplicateRegistrationError("A sniffer is already registered"
+                                             " to format: %s" % self._name)
+
+        def decorator(sniffer):
+            @wraps(sniffer)
+            def wrapped_sniffer(file, encoding=self._encoding, errors='ignore',
+                                newline=self._newline, **kwargs):
+                self._validate_encoding(encoding)
+                if encoding == 'binary':
+                    # Errors is irrelevant so set to default to prevent raising
+                    # a usage exception in open.
+                    errors = _open_kwargs['errors']
+                with open_file(file, mode='r', encoding=encoding,
+                               newline=newline, errors=errors, **kwargs) as fh:
+                    try:
+                        # Some formats may have headers which indicate their
+                        # format sniffers should be able to rely on the
+                        # filehandle to point at the beginning of the file.
+                        fh.seek(0)
+                        return sniffer(fh)
+                    except UnicodeDecodeError:
+                        pass
+                    except Exception:
+                        warn("'%s' has encountered a problem.\nPlease"
+                             " send the following to our issue tracker at\n"
+                             "https://github.com/biocore/scikit-bio/issues\n\n"
+                             "%s" % (sniffer.__name__, traceback.format_exc()),
+                             FormatIdentificationWarning)
+
+                    return False, {}
+
+            self._sniffer_function = wrapped_sniffer
+            return wrapped_sniffer
+        return decorator
+
+    @stable(as_of="0.4.0")
+    def reader(self, cls, monkey_patch=True, override=False):
+        """Decorate a function to act as the reader for a class in this format.
+
+        The function should take an argument which will be an implementation
+        of either :class:`io.TextIOBase` or :class:`io.BufferedReader`
+        depending on if the format is text or binary, respectively. Any kwargs
+        given by the user which are not handled by :func:`skbio.io.util.open`
+        will be passed into the function. Any kwarg with a default of
+        `FileSentinel` will transform user input for that parameter into a
+        filehandle or `None` if not provided.
+
+        Parameters
+        ----------
+        cls : type or None
+            The class which the function will be registered to handle. If
+            None, it is assumed that the function will produce a generator.
+        monkey_patch : bool, optional
+            Whether to allow an IORegistry to attach a `read` method to `cls`
+            with this format listed as an option.
+        override : bool, optional
+            If True, any existing readers for `cls` in this format will be
+            overriden.
+
+        Raises
+        ------
+        DuplicateRegistrationError
+            When `override` is False and a reader is already registered to
+            `cls` for this format.
+
+        Examples
+        --------
+        >>> from skbio.io.registry import Format, IORegistry
+        >>> registry = IORegistry()
+        >>> myformat = Format('myformat')
+        >>> registry.add_format(myformat)
+        >>> # If developing a new format for skbio, use the create_format()
+        >>> # factory instead of the above.
+        >>> class MyObject(object):
+        ...     def __init__(self, content):
+        ...         self.content = content
+        ...
+        >>> @myformat.reader(MyObject)
+        ... def myformat_reader(fh):
+        ...     return MyObject(fh.readlines()[1:])
+        ...
+        >>> registry.monkey_patch() # If developing skbio, this isn't needed
+        >>> MyObject.read([u"myformat2\\n", u"some content here!\\n"],
+        ...               format='myformat').content
+        [u'some content here!\\n']
+
+        """
+        self._check_registration(cls)
+
+        def decorator(reader_function):
+            file_params = find_sentinels(reader_function, FileSentinel)
+            # This split has to occur for the same reason as in IORegistry.read
+            if cls is not None:
+
+                @wraps(reader_function)
+                def wrapped_reader(file, encoding=self._encoding,
+                                   newline=self._newline, **kwargs):
+                    file_keys, files, io_kwargs = self._setup_locals(
+                        file_params, file, encoding, newline, kwargs)
+                    with open_files(files, mode='r', **io_kwargs) as fhs:
+                        # The primary file is at the end of fh because append
+                        # is cheaper than insert
+                        kwargs.update(zip(file_keys, fhs[:-1]))
+                        return reader_function(fhs[-1], **kwargs)
+            else:
+
+                @wraps(reader_function)
+                def wrapped_reader(file, encoding=self._encoding,
+                                   newline=self._newline, **kwargs):
+                    file_keys, files, io_kwargs = self._setup_locals(
+                        file_params, file, encoding, newline, kwargs)
+                    with open_files(files, mode='r', **io_kwargs) as fhs:
+                        kwargs.update(zip(file_keys, fhs[:-1]))
+                        generator = reader_function(fhs[-1], **kwargs)
+                        while True:
+                            yield next(generator)
+
+            self._add_reader(cls, wrapped_reader, monkey_patch, override)
+            return wrapped_reader
+        return decorator
+
+    @stable(as_of="0.4.0")
+    def writer(self, cls, monkey_patch=True, override=False):
+        """Decorate a function to act as the writer for a class in this format.
+
+        The function should take an instance of `cls` as its first argument
+        and the second argument is a filehandle which will be an implementation
+        of either :class:`io.TextIOBase` or :class:`io.BufferedWriter`
+        depending on if the format is text or binary, respectively. Any kwargs
+        given by the user which are not handled by :func:`skbio.io.util.open`
+        will be passed into the function. Any kwarg with a default of
+        `FileSentinel` will transform user input for that parameter into a
+        filehandle or `None` if not provided.
+
+        Parameters
+        ----------
+        cls : type or None
+            The class which the function will be registered to handle. If
+            None, it is assumed that the function will consume a generator.
+        monkey_patch : bool, optional
+            Whether to allow an IORegistry to attach a `write` method to `cls`
+            with this format listed as an option.
+        override : bool, optional
+            If True, any existing writers for `cls` in this format will be
+            overriden.
+
+        Raises
+        ------
+        DuplicateRegistrationError
+            When `override` is False and a writer is already registered to
+            `cls` for this format.
+
+        Examples
+        --------
+        >>> from skbio.io.registry import Format, IORegistry
+        >>> registry = IORegistry()
+        >>> myformat = Format('myformat')
+        >>> registry.add_format(myformat)
+        >>> # If developing a new format for skbio, use the create_format()
+        >>> # factory instead of the above.
+        >>> class MyObject(object):
+        ...     default_write_format = 'myformat'
+        ...     def __init__(self, content):
+        ...         self.content = content
+        ...
+        >>> @myformat.writer(MyObject)
+        ... def myformat_reader(obj, fh):
+        ...     fh.write(u"myformat2\\n")
+        ...     for c in obj.content:
+        ...         fh.write(c)
+        ...
+        >>> registry.monkey_patch() # If developing skbio, this isn't needed
+        >>> obj = MyObject([u"some content here!\\n"])
+        >>> obj.write([], format='myformat')
+        [u'myformat2\\n', u'some content here!\\n']
+
+        """
+        self._check_registration(cls)
+
+        def decorator(writer_function):
+            file_params = find_sentinels(writer_function, FileSentinel)
+
+            @wraps(writer_function)
+            def wrapped_writer(obj, file, encoding=self._encoding,
+                               newline=self._newline, **kwargs):
+                file_keys, files, io_kwargs = self._setup_locals(
+                    file_params, file, encoding, newline, kwargs)
+                with open_files(files, mode='w', **io_kwargs) as fhs:
+                    kwargs.update(zip(file_keys, fhs[:-1]))
+                    writer_function(obj, fhs[-1], **kwargs)
+
+            self._add_writer(cls, wrapped_writer, monkey_patch, override)
+            return wrapped_writer
+        return decorator
+
+    def _check_registration(self, cls):
+        if cls is not None and not inspect.isclass(cls):
+            raise InvalidRegistrationError("`cls` must be a class or None, not"
+                                           " %r" % cls)
+
+    def _setup_locals(self, file_params, file, encoding, newline, kwargs):
+        self._validate_encoding(encoding)
+        io_kwargs = self._pop_io_kwargs(kwargs, encoding, newline)
+        file_keys, files = self._setup_file_args(kwargs, file_params)
+        files.append(file)
+
+        return file_keys, files, io_kwargs
+
+    def _validate_encoding(self, encoding):
+        if encoding != self._encoding:
+            if self._encoding == 'binary':
+                raise ValueError("Encoding must be 'binary' for %r"
+                                 % self.name)
+            if encoding == 'binary':
+                raise ValueError("Encoding must not be 'binary' for %r"
+                                 % self.name)
+
+    def _pop_io_kwargs(self, kwargs, encoding, newline):
+        io_kwargs = dict(encoding=encoding, newline=newline)
+        for key in _open_kwargs:
+            if key in kwargs:
+                io_kwargs[key] = kwargs.pop(key)
+        return io_kwargs
+
+    def _setup_file_args(self, kwargs, file_params):
+        file_keys = []
+        files = []
+        for param in file_params:
+            arg = kwargs.get(param, None)
+            if arg is not None:
+                file_keys.append(param)
+                files.append(arg)
+            else:
+                # set to None to mask FileSentinel when user neglected argument
+                kwargs[param] = None
+
+        return file_keys, files
+
+    def _add_writer(self, cls, writer, monkey_patch, override):
+        if cls in self._writers and not override:
+            raise DuplicateRegistrationError("There is already a writer"
+                                             " registered to %s in format: %s"
+                                             % (cls, self._name))
+        self._writers[cls] = writer
+        if monkey_patch and cls is not None:
+            self._monkey_patch['write'].add(cls)
+
+    def _add_reader(self, cls, reader, monkey_patch, override):
+        if cls in self._readers and not override:
+            raise DuplicateRegistrationError("There is already a reader"
+                                             " registered to %s in format: %s"
+                                             % (cls, self._name))
+        self._readers[cls] = reader
+        if monkey_patch and cls is not None:
+            self._monkey_patch['read'].add(cls)
+
+
+io_registry = IORegistry()
+
+
+ at wraps(IORegistry.sniff)
+def sniff(file, **kwargs):
+    return io_registry.sniff(file, **kwargs)
+
+
+ at wraps(IORegistry.read)
+def read(file, format=None, into=None, verify=True, **kwargs):
+    return io_registry.read(file, format=format, into=into, verify=verify,
+                            **kwargs)
+
+
+ at wraps(IORegistry.write)
+def write(obj, format, into, **kwargs):
+    return io_registry.write(obj, format, into, **kwargs)
+
+
+ at wraps(IORegistry.create_format)
+def create_format(*args, **kwargs):
+    return io_registry.create_format(*args, **kwargs)
diff --git a/skbio/io/tests/__init__.py b/skbio/io/tests/__init__.py
index 0bf0c55..3fe3dc6 100644
--- a/skbio/io/tests/__init__.py
+++ b/skbio/io/tests/__init__.py
@@ -5,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/io/tests/data/big5_file b/skbio/io/tests/data/big5_file
new file mode 100644
index 0000000..c1927d0
--- /dev/null
+++ b/skbio/io/tests/data/big5_file
@@ -0,0 +1 @@
+�A�n
diff --git a/skbio/io/tests/data/big5_file.bz2 b/skbio/io/tests/data/big5_file.bz2
new file mode 100644
index 0000000..3b9ad9f
Binary files /dev/null and b/skbio/io/tests/data/big5_file.bz2 differ
diff --git a/skbio/io/tests/data/big5_file.gz b/skbio/io/tests/data/big5_file.gz
new file mode 100644
index 0000000..223286b
Binary files /dev/null and b/skbio/io/tests/data/big5_file.gz differ
diff --git a/skbio/io/tests/data/example_file b/skbio/io/tests/data/example_file
new file mode 100644
index 0000000..65befad
--- /dev/null
+++ b/skbio/io/tests/data/example_file
@@ -0,0 +1,2 @@
+This is some content
+It occurs on more than one line
diff --git a/skbio/io/tests/data/example_file.bz2 b/skbio/io/tests/data/example_file.bz2
new file mode 100644
index 0000000..44c3a5e
Binary files /dev/null and b/skbio/io/tests/data/example_file.bz2 differ
diff --git a/skbio/io/tests/data/example_file.gz b/skbio/io/tests/data/example_file.gz
new file mode 100644
index 0000000..0a5e3fc
Binary files /dev/null and b/skbio/io/tests/data/example_file.gz differ
diff --git a/skbio/io/tests/data/fasta_single_nuc_seq_defaults b/skbio/io/tests/data/fasta_single_nuc_seq_defaults
deleted file mode 100644
index 05481e6..0000000
--- a/skbio/io/tests/data/fasta_single_nuc_seq_defaults
+++ /dev/null
@@ -1,2 +0,0 @@
->f_o_o b a r
-ACGTU
diff --git a/skbio/io/tests/data/fasta_single_nuc_seq_non_defaults b/skbio/io/tests/data/fasta_single_nuc_seq_non_defaults
deleted file mode 100644
index e8a4072..0000000
--- a/skbio/io/tests/data/fasta_single_nuc_seq_non_defaults
+++ /dev/null
@@ -1,6 +0,0 @@
->f-o-o b_a_r
-A
-C
-G
-T
-U
diff --git a/skbio/io/tests/data/qual_single_seq b/skbio/io/tests/data/qual_single_seq
deleted file mode 100644
index 967dc14..0000000
--- a/skbio/io/tests/data/qual_single_seq
+++ /dev/null
@@ -1,2 +0,0 @@
->seq1 desc1
-10 20 30 10 0 0 0 88888 1 3456
diff --git a/skbio/io/tests/test_iosources.py b/skbio/io/tests/test_iosources.py
new file mode 100644
index 0000000..50cbd29
--- /dev/null
+++ b/skbio/io/tests/test_iosources.py
@@ -0,0 +1,53 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import unittest
+
+from skbio.io._iosources import IOSource, Compressor
+
+
+class TestIOSource(unittest.TestCase):
+
+    def setUp(self):
+        self.file = 'somepath'
+        self.options = {'a': 1, 'b': 2}
+
+        self.source = IOSource(self.file, self.options)
+
+    def test_attributes(self):
+        self.assertEqual(self.source.file, self.file)
+        self.assertEqual(self.source.options, self.options)
+
+    def test_can_read(self):
+        self.assertEqual(self.source.can_read(), False)
+
+    def test_can_write(self):
+        self.assertEqual(self.source.can_write(), False)
+
+    def test_get_reader(self):
+        with self.assertRaises(NotImplementedError):
+            self.source.get_reader()
+
+    def test_get_writer(self):
+        with self.assertRaises(NotImplementedError):
+            self.source.get_writer()
+
+
+class TestCompressor(TestIOSource):
+    def setUp(self):
+        super(TestCompressor, self).setUp()
+        self.compressor = Compressor(self.file, self.options)
+
+    def test_can_write(self):
+        self.assertEqual(self.compressor.can_write(), True)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/skbio/io/tests/test_registry.py b/skbio/io/tests/test_registry.py
index 0b7e4b5..376a713 100644
--- a/skbio/io/tests/test_registry.py
+++ b/skbio/io/tests/test_registry.py
@@ -7,23 +7,24 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-try:
-    # future >= 0.12
-    from future.backports.test.support import import_fresh_module
-except ImportError:
-    from future.standard_library.test.support import import_fresh_module
+from six.moves import zip_longest
+
 from io import StringIO
+import io
 import os
-
 import unittest
 import warnings
+import types
 from tempfile import mkstemp
 
-from skbio.io import (DuplicateRegistrationError, FormatIdentificationWarning,
-                      InvalidRegistrationError, UnrecognizedFormatError,
-                      ArgumentOverrideWarning)
-from skbio.io._registry import empty_file_sniffer
+from skbio.io import (FormatIdentificationWarning, UnrecognizedFormatError,
+                      ArgumentOverrideWarning, io_registry, sniff,
+                      create_format)
+from skbio.io.registry import (IORegistry, FileSentinel, Format,
+                               DuplicateRegistrationError,
+                               InvalidRegistrationError)
 from skbio.util import TestingUtilError, get_data_path
+from skbio import DNA, read, write
 
 
 class TestClass(object):
@@ -47,12 +48,18 @@ class TestClassB(TestClass):
     pass
 
 
+class TestFormatAndIORegistry(unittest.TestCase):
+    def test_add_duplicate_format(self):
+        f = Format('Example')
+        r = IORegistry()
+        r.add_format(f)
+        with self.assertRaises(DuplicateRegistrationError):
+            r.add_format(Format('Example'))
+
+
 class RegistryTest(unittest.TestCase):
     def setUp(self):
-        # A fresh module needs to be imported for each test because the
-        # registry stores its state in the module which is by default
-        # only loaded once.
-        self.module = import_fresh_module('skbio.io._registry')
+        self.registry = IORegistry()
         self.fd1, self.fp1 = mkstemp()
         self.fd2, self.fp2 = mkstemp()
 
@@ -65,79 +72,90 @@ class RegistryTest(unittest.TestCase):
 
 class TestRegisterAndGetReader(RegistryTest):
     def test_get_reader_no_match(self):
-        self.assertEqual(None, self.module.get_reader('not_a_format',
-                                                      TestClass))
-
-    def test_register_reader_on_generator(self):
-        @self.module.register_reader('format1')
-        def format1_reader_generator(fh):
-            yield
-
-        self.assertEqual(format1_reader_generator,
-                         self.module.get_reader('format1'))
-
-        self.assertEqual(format1_reader_generator,
-                         self.module.get_reader('format1', None))
-
-        @self.module.register_reader('format2', None)
-        def format2_reader_generator(fh):
-            yield
-
-        self.assertEqual(format2_reader_generator,
-                         self.module.get_reader('format2'))
-
-        self.assertEqual(format2_reader_generator,
-                         self.module.get_reader('format2', None))
+        self.assertIs(None, self.registry.get_reader('not_a_format',
+                                                     TestClass))
 
     def test_get_reader_when_only_writer_exists(self):
-        @self.module.register_writer('format', TestClass)
-        def format_reader(fh):
+        format1 = self.registry.create_format('format1')
+
+        @format1.writer(TestClass)
+        def format_writer(fh):
             return
 
-        self.assertEqual(None, self.module.get_reader('format', TestClass))
+        self.assertEqual(None, self.registry.get_reader('format', TestClass))
 
     def test_register_reader_on_many(self):
-        @self.module.register_reader('format1', TestClassA)
+        format1 = self.registry.create_format('format1')
+        format2 = self.registry.create_format('format2')
+        format3 = self.registry.create_format('format3')
+        format4 = self.registry.create_format('format4', encoding='binary')
+        format5 = self.registry.create_format('format5', encoding='binary')
+
+        @format1.reader(TestClassA)
         def format1_reader(fh):
             return
 
-        @self.module.register_reader('format1', TestClassB)
+        @format1.reader(TestClassB)
         def format1_reader_b(fh):
             return
 
-        @self.module.register_reader('format2', TestClassA)
+        @format2.reader(TestClassA)
         def format2_reader(fh):
             return
 
-        @self.module.register_reader('format3', TestClassB)
+        @format3.reader(TestClassB)
         def format3_reader(fh):
             return
 
-        self.assertEqual(format1_reader,
-                         self.module.get_reader('format1', TestClassA))
+        @format4.reader(TestClassA)
+        def format4_reader(fh):
+            return
 
-        self.assertEqual(format1_reader_b,
-                         self.module.get_reader('format1', TestClassB))
+        @format4.reader(TestClassB)
+        def format4_reader_b(fh):
+            return
 
-        self.assertEqual(format2_reader,
-                         self.module.get_reader('format2', TestClassA))
+        @format5.reader(None)
+        def format5_reader(fh):
+            return
 
-        self.assertEqual(None,
-                         self.module.get_reader('format2', TestClassB))
+        self.assertIs(format1_reader,
+                      self.registry.get_reader('format1', TestClassA))
 
-        self.assertEqual(None,
-                         self.module.get_reader('format3', TestClassA))
+        self.assertIs(format1_reader_b,
+                      self.registry.get_reader('format1', TestClassB))
+
+        self.assertIs(format2_reader,
+                      self.registry.get_reader('format2', TestClassA))
+
+        self.assertIs(None, self.registry.get_reader('format2', TestClassB))
+
+        self.assertIs(None, self.registry.get_reader('format3', TestClassA))
+
+        self.assertIs(format3_reader,
+                      self.registry.get_reader('format3', TestClassB))
+
+        self.assertIs(format4_reader,
+                      self.registry.get_reader('format4', TestClassA))
+
+        self.assertIs(format4_reader_b,
+                      self.registry.get_reader('format4', TestClassB))
 
-        self.assertEqual(format3_reader,
-                         self.module.get_reader('format3', TestClassB))
+        self.assertIs(format5_reader,
+                      self.registry.get_reader('format5', None))
+
+        self.assertIs(None, self.registry.get_reader('format5', TestClassA))
+
+        self.assertIs(None, self.registry.get_reader('format5', TestClassB))
 
     def test_register_reader_over_existing(self):
+        format1 = self.registry.create_format('format1')
         with self.assertRaises(DuplicateRegistrationError) as cm:
-            @self.module.register_reader('format1', TestClassA)
+            @format1.reader(TestClassA)
             def format1_reader(fh):
                 return
 
-            @self.module.register_reader('format1', TestClassA)
+            @format1.reader(TestClassA)
             def duplicate_format1_reader(fh):
                 return
 
@@ -145,92 +163,120 @@ class TestRegisterAndGetReader(RegistryTest):
         self.assertTrue('reader' in str(cm.exception))
         self.assertTrue(TestClassA.__name__ in str(cm.exception))
 
-    def test_register_reader_generator_with_not_a_generator(self):
-        @self.module.register_reader('format')
-        def not_a_generator(fp):
-            return 'oops'
-
-        fh = StringIO()
-        with self.assertRaises(InvalidRegistrationError):
-            next(self.module.get_reader('format')(fh))
-        fh.close()
+    def test_register_reader_over_existing_override(self):
+        format1 = self.registry.create_format('format1')
 
+        @format1.reader(TestClassA)
+        def format1_reader(fh):
+            return
 
-class TestRegisterAndGetWriter(RegistryTest):
-    def test_get_writer_no_match(self):
-        self.assertEqual(None, self.module.get_writer('not_a_format',
-                                                      TestClass))
+        self.assertIs(format1_reader,
+                      self.registry.get_reader('format1', TestClassA))
 
-    def test_get_writer_when_only_reader_exists(self):
-        @self.module.register_reader('format', TestClass)
-        def format_reader(fh):
+        @format1.reader(TestClassA, override=True)
+        def duplicate_format1_reader(fh):
             return
 
-        self.assertEqual(None, self.module.get_writer('format', TestClass))
+        self.assertIs(duplicate_format1_reader,
+                      self.registry.get_reader('format1', TestClassA))
 
-    def test_register_writer_on_generator(self):
-        @self.module.register_writer('format1')
-        def format1_writer_generator(obj, fh):
-            yield
+    def test_mistype_reader_registration(self):
+        format1 = self.registry.create_format('format1')
 
-        self.assertEqual(format1_writer_generator,
-                         self.module.get_writer('format1'))
+        with self.assertRaises(InvalidRegistrationError):
+            @format1.reader
+            def left_out_parens(fh):
+                return
 
-        self.assertEqual(format1_writer_generator,
-                         self.module.get_writer('format1', None))
 
-        @self.module.register_writer('format2', None)
-        def format2_writer_generator(obj, fh):
-            yield
+class TestRegisterAndGetWriter(RegistryTest):
+    def test_get_writer_no_match(self):
+        self.assertEqual(None, self.registry.get_writer('not_a_format',
+                                                        TestClass))
+
+    def test_get_writer_when_only_reader_exists(self):
+        format = self.registry.create_format('format')
 
-        self.assertEqual(format2_writer_generator,
-                         self.module.get_writer('format2'))
+        @format.reader(TestClass)
+        def format_reader(fh):
+            return
 
-        self.assertEqual(format2_writer_generator,
-                         self.module.get_writer('format2', None))
+        self.assertEqual(None, self.registry.get_writer('format', TestClass))
 
     def test_register_writer_on_many(self):
-        @self.module.register_writer('format1', TestClassA)
+        format1 = self.registry.create_format('format1')
+        format2 = self.registry.create_format('format2')
+        format3 = self.registry.create_format('format3')
+        format4 = self.registry.create_format('format4', encoding='binary')
+        format5 = self.registry.create_format('format5', encoding='binary')
+
+        @format1.writer(TestClassA)
         def format1_writer(obj, fh):
             return
 
-        @self.module.register_writer('format1', TestClassB)
+        @format1.writer(TestClassB)
         def format1_writer_b(obj, fh):
             return
 
-        @self.module.register_writer('format2', TestClassA)
+        @format2.writer(TestClassA)
         def format2_writer(obj, fh):
             return
 
-        @self.module.register_writer('format3', TestClassB)
+        @format3.writer(TestClassB)
         def format3_writer(obj, fh):
             return
 
+        @format4.writer(TestClassA)
+        def format4_writer(fh):
+            return
+
+        @format4.writer(TestClassB)
+        def format4_writer_b(fh):
+            return
+
+        @format5.writer(None)
+        def format5_writer(fh):
+            return
+
         self.assertEqual(format1_writer,
-                         self.module.get_writer('format1', TestClassA))
+                         self.registry.get_writer('format1', TestClassA))
 
         self.assertEqual(format1_writer_b,
-                         self.module.get_writer('format1', TestClassB))
+                         self.registry.get_writer('format1', TestClassB))
 
         self.assertEqual(format2_writer,
-                         self.module.get_writer('format2', TestClassA))
+                         self.registry.get_writer('format2', TestClassA))
 
         self.assertEqual(None,
-                         self.module.get_writer('format2', TestClassB))
+                         self.registry.get_writer('format2', TestClassB))
 
         self.assertEqual(None,
-                         self.module.get_writer('format3', TestClassA))
+                         self.registry.get_writer('format3', TestClassA))
 
         self.assertEqual(format3_writer,
-                         self.module.get_writer('format3', TestClassB))
+                         self.registry.get_writer('format3', TestClassB))
+
+        self.assertIs(format4_writer,
+                      self.registry.get_writer('format4', TestClassA))
+
+        self.assertIs(format4_writer_b,
+                      self.registry.get_writer('format4', TestClassB))
+
+        self.assertIs(format5_writer,
+                      self.registry.get_writer('format5', None))
+
+        self.assertIs(None, self.registry.get_writer('format5', TestClassA))
+
+        self.assertIs(None, self.registry.get_writer('format5', TestClassB))
 
     def test_register_writer_over_existing(self):
+        format1 = self.registry.create_format('format1')
         with self.assertRaises(DuplicateRegistrationError) as cm:
-            @self.module.register_writer('format1', TestClassA)
+            @format1.writer(TestClassA)
             def format1_writer(obj, fh):
                 return
 
-            @self.module.register_writer('format1', TestClassA)
+            @format1.writer(TestClassA)
             def duplicate_format1_writer(obj, fh):
                 return
 
@@ -238,66 +284,101 @@ class TestRegisterAndGetWriter(RegistryTest):
         self.assertTrue('writer' in str(cm.exception))
         self.assertTrue(TestClassA.__name__ in str(cm.exception))
 
-    def test_register_writer_over_existing_generator(self):
-        with self.assertRaises(DuplicateRegistrationError) as cm:
-            @self.module.register_writer('format1')
-            def format1_writer(obj, fh):
-                return
+    def test_register_writer_over_existing_override(self):
+        format1 = self.registry.create_format('format1')
 
-            @self.module.register_writer('format1')
-            def duplicate_format1_writer(obj, fh):
-                return
+        @format1.writer(TestClassA)
+        def format1_writer(obj, fh):
+            return
 
-        self.assertTrue('format1' in str(cm.exception))
-        self.assertTrue('writer' in str(cm.exception))
-        self.assertTrue('generator' in str(cm.exception))
+        self.assertIs(format1_writer,
+                      self.registry.get_writer('format1', TestClassA))
+
+        @format1.writer(TestClassA, override=True)
+        def duplicate_format1_writer(obj, fh):
+            return
+
+        self.assertIs(duplicate_format1_writer,
+                      self.registry.get_writer('format1', TestClassA))
+
+    def test_mistype_writer_registration(self):
+        format1 = self.registry.create_format('format1')
+
+        with self.assertRaises(InvalidRegistrationError):
+            @format1.writer
+            def left_out_parens(fh):
+                return
 
 
 class TestRegisterAndGetSniffer(RegistryTest):
     def test_get_sniffer_no_match(self):
-        self.assertEqual(None, self.module.get_sniffer('not_a_format'))
+        self.assertEqual(None, self.registry.get_sniffer('not_a_format'))
 
     def test_register_sniffer_on_many(self):
-        @self.module.register_sniffer('format1')
+        format1 = self.registry.create_format('format1')
+        format2 = self.registry.create_format('format2')
+        format3 = self.registry.create_format('format3', encoding='binary')
+
+        @format1.sniffer()
         def format1_sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_sniffer('format2')
+        @format2.sniffer()
         def format2_sniffer(fh):
             return '2' in fh.readline(), {}
 
-        @self.module.register_sniffer('format3')
+        @format3.sniffer()
         def format3_sniffer(fh):
             return '3' in fh.readline(), {}
 
         self.assertEqual(format1_sniffer,
-                         self.module.get_sniffer('format1'))
+                         self.registry.get_sniffer('format1'))
 
         self.assertEqual(format2_sniffer,
-                         self.module.get_sniffer('format2'))
+                         self.registry.get_sniffer('format2'))
 
         self.assertEqual(format3_sniffer,
-                         self.module.get_sniffer('format3'))
+                         self.registry.get_sniffer('format3'))
 
     def test_register_sniffer_over_existing(self):
+        format1 = self.registry.create_format('format1')
+
         with self.assertRaises(DuplicateRegistrationError) as cm:
-            @self.module.register_sniffer('format1')
+            @format1.sniffer()
             def format1_sniffer(fh):
                 return False, {}
 
-            @self.module.register_sniffer('format1')
+            @format1.sniffer()
             def duplicate_format1_sniffer(fh):
                 return False, {}
 
         self.assertTrue('format1' in str(cm.exception))
 
+    def test_register_sniffer_over_existing_override(self):
+        format1 = self.registry.create_format('format1')
+
+        @format1.sniffer()
+        def format1_sniffer(fh):
+            return False, {}
+
+        self.assertIs(self.registry.get_sniffer('format1'), format1_sniffer)
+
+        @format1.sniffer(override=True)
+        def duplicate_format1_sniffer(fh):
+            return False, {}
+
+        self.assertIs(self.registry.get_sniffer('format1'),
+                      duplicate_format1_sniffer)
+
     def test_sniffer_warns_on_exception(self):
-        @self.module.register_sniffer('format')
+        format = self.registry.create_format('format')
+
+        @format.sniffer()
         def format_sniffer(fh):
             raise TestingUtilError("Sniffer will return False and warn.")
 
         fh = StringIO()
-        sniffer = self.module.get_sniffer('format')
+        sniffer = self.registry.get_sniffer('format')
         with warnings.catch_warnings(record=True):
             warnings.simplefilter("error")
             with self.assertRaises(FormatIdentificationWarning):
@@ -311,48 +392,67 @@ class TestRegisterAndGetSniffer(RegistryTest):
 
         fh.close()
 
+    def test_mistype_sniffer_registration(self):
+        format1 = self.registry.create_format('format1')
+
+        with self.assertRaises(InvalidRegistrationError):
+            @format1.sniffer
+            def left_out_parens(fh):
+                return
+
 
 class TestListReadFormats(RegistryTest):
     def test_no_read_formats(self):
-        @self.module.register_reader('format1', TestClassA)
+        format1 = self.registry.create_format('format1')
+
+        @format1.reader(TestClassA)
         def this_isnt_on_clsB(fh):
             return
 
-        self.assertEqual([], self.module.list_read_formats(TestClassB))
+        self.assertEqual([], self.registry.list_read_formats(TestClassB))
 
     def test_one_read_format(self):
-        @self.module.register_reader('format1', TestClass)
+        format1 = self.registry.create_format('format1')
+
+        @format1.reader(TestClass)
         def format1_cls(fh):
             return
 
-        self.assertEqual(['format1'], self.module.list_read_formats(TestClass))
+        self.assertEqual(['format1'],
+                         self.registry.list_read_formats(TestClass))
 
     def test_many_read_formats(self):
-        @self.module.register_reader('format1', TestClassA)
+        format1 = self.registry.create_format('format1')
+        format2 = self.registry.create_format('format2')
+        format3 = self.registry.create_format('format3', encoding='binary')
+        format4 = self.registry.create_format('format4')
+        format5 = self.registry.create_format('format5', encoding='binary')
+
+        @format1.reader(TestClassA)
         def format1_clsA(fh):
             return
 
-        @self.module.register_reader('format2', TestClassA)
+        @format2.reader(TestClassA)
         def format2_clsA(fh):
             return
 
-        @self.module.register_reader('format3', TestClassA)
+        @format3.reader(TestClassA)
         def format3_clsA(fh):
             return
 
-        @self.module.register_reader('format3', TestClassB)
+        @format3.reader(TestClassB)
         def format3_clsB(fh):
             return
 
-        @self.module.register_reader('format4', TestClassB)
+        @format4.reader(TestClassB)
         def format4_clsB(fh):
             return
 
-        @self.module.register_writer('format5', TestClassA)
+        @format5.writer(TestClassA)
         def format5_clsA(fh):
             return
 
-        formats = self.module.list_read_formats(TestClassA)
+        formats = self.registry.list_read_formats(TestClassA)
         self.assertTrue('format1' in formats)
         self.assertTrue('format2' in formats)
         self.assertTrue('format3' in formats)
@@ -362,46 +462,56 @@ class TestListReadFormats(RegistryTest):
 
 class TestListWriteFormats(RegistryTest):
     def test_no_write_formats(self):
-        @self.module.register_writer('format1', TestClassA)
+        format1 = self.registry.create_format('format1')
+
+        @format1.writer(TestClassA)
         def this_isnt_on_clsB(fh):
             return
 
-        self.assertEqual([], self.module.list_write_formats(TestClassB))
+        self.assertEqual([], self.registry.list_write_formats(TestClassB))
 
     def test_one_write_format(self):
-        @self.module.register_writer('format1', TestClass)
+        format1 = self.registry.create_format('format1')
+
+        @format1.writer(TestClass)
         def format1_cls(fh):
             return
 
         self.assertEqual(['format1'],
-                         self.module.list_write_formats(TestClass))
+                         self.registry.list_write_formats(TestClass))
 
     def test_many_write_formats(self):
-        @self.module.register_writer('format1', TestClassA)
+        format1 = self.registry.create_format('format1')
+        format2 = self.registry.create_format('format2')
+        format3 = self.registry.create_format('format3', encoding='binary')
+        format4 = self.registry.create_format('format4')
+        format5 = self.registry.create_format('format5', encoding='binary')
+
+        @format1.writer(TestClassA)
         def format1_clsA(fh):
             return
 
-        @self.module.register_writer('format2', TestClassA)
+        @format2.writer(TestClassA)
         def format2_clsA(fh):
             return
 
-        @self.module.register_writer('format3', TestClassA)
+        @format3.writer(TestClassA)
         def format3_clsA(fh):
             return
 
-        @self.module.register_writer('format3', TestClassB)
+        @format3.writer(TestClassB)
         def format3_clsB(fh):
             return
 
-        @self.module.register_writer('format4', TestClassB)
+        @format4.writer(TestClassB)
         def format4_clsB(fh):
             return
 
-        @self.module.register_reader('format5', TestClassA)
+        @format5.reader(TestClassA)
         def format5_clsA(fh):
             return
 
-        formats = self.module.list_write_formats(TestClassA)
+        formats = self.registry.list_write_formats(TestClassA)
 
         self.assertTrue('format1' in formats)
         self.assertTrue('format2' in formats)
@@ -413,151 +523,375 @@ class TestListWriteFormats(RegistryTest):
 class TestSniff(RegistryTest):
     def setUp(self):
         super(TestSniff, self).setUp()
-
-        @self.module.register_sniffer('format1')
+        format1 = self.registry.create_format('format1')
+        format2 = self.registry.create_format('format2')
+        format3 = self.registry.create_format('format3')
+        format4 = self.registry.create_format('format4')
+        # No sniffer for this format:
+        self.registry.create_format('format5')
+
+        @format1.sniffer()
         def format1_sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_sniffer('format2')
+        @format2.sniffer()
         def format2_sniffer(fh):
             return '2' in fh.readline(), {}
 
-        @self.module.register_sniffer('format3')
+        @format3.sniffer()
         def format3_sniffer(fh):
             return '3' in fh.readline(), {}
 
-        @self.module.register_sniffer('format4')
+        @format4.sniffer()
         def format4_sniffer(fh):
             return '4' in fh.readline(), {}
 
-        @self.module.register_reader('format3', TestClass)
+        @format3.reader(TestClass)
         def reader3(fh):
             return
 
-        @self.module.register_reader('format4', TestClass)
+        @format4.reader(TestClass)
         def reader4(fh):
             return
 
     def test_no_matches(self):
         fh = StringIO(u"no matches here")
         with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.sniff(fh)
+            self.registry.sniff(fh)
         self.assertTrue(str(fh) in str(cm.exception))
 
-        with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.sniff(fh, cls=TestClass)
-
-        with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.sniff(fh, cls=TestClassB)
-
         fh.close()
 
     def test_one_match(self):
         fh = StringIO(u"contains a 3")
-        self.assertEqual('format3', self.module.sniff(fh)[0])
+        self.assertEqual('format3', self.registry.sniff(fh)[0])
 
     def test_many_matches(self):
         fh = StringIO(u"1234 will match all")
         with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.sniff(fh)
+            self.registry.sniff(fh)
         self.assertTrue("format1" in str(cm.exception))
         self.assertTrue("format2" in str(cm.exception))
         self.assertTrue("format3" in str(cm.exception))
         self.assertTrue("format4" in str(cm.exception))
         fh.close()
 
-    def test_no_matches_w_cls(self):
-        fh = StringIO(u"no matches here")
-        with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.sniff(fh, cls=TestClass)
-        self.assertTrue(str(fh) in str(cm.exception))
-        fh.close()
+    def test_that_encoding_is_used(self):
+        formatx = self.registry.create_format('formatx')
 
-    def test_one_match_w_cls(self):
-        fh = StringIO(u"contains a 3")
-        self.assertEqual('format3',
-                         self.module.sniff(fh, cls=TestClass)[0])
+        fp = get_data_path('big5_file')
 
-    def test_many_matches_w_cls(self):
-        fh = StringIO(u"1234 will only format3 and format4 w/ class")
-        with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.sniff(fh, cls=TestClass)
-        self.assertTrue("format1" not in str(cm.exception))
-        self.assertTrue("format2" not in str(cm.exception))
-        # Only format3 and format4 have a definition for the provided class.
-        self.assertTrue("format3" in str(cm.exception))
-        self.assertTrue("format4" in str(cm.exception))
-        fh.close()
+        @formatx.sniffer()
+        def sniffer(fh):
+            self.assertEqual('big5', fh.encoding)
+            return True, {}
 
-    def test_that_mode_is_used(self):
-        fp = self.fp1
-        with open(fp, 'w') as fh:
-            fh.write('@\n#\n')
+        fmt, _ = self.registry.sniff(fp, encoding='big5')
+        self.assertEqual(fmt, 'formatx')
+
+    def test_that_newline_is_used(self):
+        formatx = self.registry.create_format('formatx')
+
+        fp = get_data_path('real_file')
+
+        @formatx.sniffer()
+        def sniffer(fh):
+            self.assertEqual(fh.readlines(), ['a\nb\nc\nd\ne\n'])
+            return True, {}
+
+        fmt, _ = self.registry.sniff(fp, newline='\r')
+        self.assertEqual(fmt, 'formatx')
+
+    def test_non_default_encoding(self):
+        big5_format = self.registry.create_format('big5_format',
+                                                  encoding='big5')
+
+        @big5_format.sniffer()
+        def sniffer(fh):
+            self.assertEqual(self._expected_encoding, fh.encoding)
+            return True, {}
+
+        self._expected_encoding = 'big5'
+        fmt, _ = self.registry.sniff(self.fp1)
+        self.assertEqual(fmt, 'big5_format')
+
+        self._expected_encoding = 'UTF-8'
+        fmt, _ = self.registry.sniff(self.fp1, encoding='UTF-8')
+        self.assertEqual(fmt, 'big5_format')
+
+    def test_non_default_newline(self):
+        formatx = self.registry.create_format('formatx', newline='\r')
 
-        @self.module.register_sniffer('format')
+        fp = get_data_path('real_file')
+
+        @formatx.sniffer()
         def sniffer(fh):
-            self.assertEqual(self.expected_mode, fh.mode)
-            return '@' in fh.readline(), {}
+            self.assertEqual(fh.readlines(), self._expected_lines)
+            return True, {}
 
-        self.expected_mode = 'U'
-        self.module.sniff(fp)
+        self._expected_lines = ['a\nb\nc\nd\ne\n']
+        fmt, _ = self.registry.sniff(fp)
+        self.assertEqual(fmt, 'formatx')
 
-        self.expected_mode = 'r'
-        self.module.sniff(fp, mode='r')
+        self._expected_lines = ['a\n', 'b\n', 'c\n', 'd\n', 'e\n']
+        fmt, _ = self.registry.sniff(fp, newline=None)
+        self.assertEqual(fmt, 'formatx')
 
     def test_position_not_mutated_real_file(self):
-        @self.module.register_sniffer('format')
+        formatx = self.registry.create_format('formatx')
+
+        @formatx.sniffer()
         def sniffer(fh):
             return True, {}
 
-        with open(get_data_path('real_file')) as fh:
+        with io.open(get_data_path('real_file')) as fh:
             fh.seek(2)
-            self.module.sniff(fh)
-            self.assertEqual('b\n', next(fh))
+            self.registry.sniff(fh)
+            self.assertEqual(fh.tell(), 2)
+            self.assertEqual('b\n', fh.readline())
 
     def test_position_not_mutated_fileish(self):
-        @self.module.register_sniffer('format')
+        formatx = self.registry.create_format('formatx')
+
+        @formatx.sniffer()
         def sniffer(fh):
             return True, {}
 
         fh = StringIO(u'a\nb\nc\nd\n')
         fh.seek(2)
-        self.module.sniff(fh)
-        self.assertEqual('b\n', next(fh))
+        self.registry.sniff(fh)
+        self.assertEqual('b\n', fh.readline())
+
+    def test_sniff_with_errors_in_sniffer(self):
+        formatx = self.registry.create_format('formatx', encoding='ascii')
+
+        @formatx.sniffer()
+        def sniffer(fh):
+            raise Exception("OH NO!")
+
+        fp = get_data_path('big5_file')
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('error')
+            with self.assertRaises(FormatIdentificationWarning):
+                fmt, _ = self.registry.sniff(fp)
+
+    def test_sniff_with_encoding_errors(self):
+        formatx = self.registry.create_format('formatx', encoding='ascii')
+
+        @formatx.sniffer()
+        def sniffer(fh):
+            fh.read()
+            return True, {}
+
+        fp = get_data_path('big5_file')
+        with self.assertRaises(UnrecognizedFormatError):
+            fmt, _ = self.registry.sniff(fp, errors='strict')
+        # errors is set to ignore by default, so our sniffer will return
+        # true even though read() didn't entirely work for ascii
+        fmt, _ = self.registry.sniff(fp)
+        self.assertEqual(fmt, 'formatx')
+
+    def test_binary_sniffer(self):
+        binf = self.registry.create_format('binf', encoding='binary')
+
+        @binf.sniffer()
+        def sniffer(fh):
+            self.assertIsInstance(fh, (io.BufferedReader, io.BufferedRandom))
+            return True, {}
+
+        fmt, _ = self.registry.sniff(self.fp1)
+        self.assertEqual(fmt, 'binf')
+
+    def test_text_sniffer(self):
+        textf = self.registry.create_format('textf', encoding=None)
+
+        @textf.sniffer()
+        def sniffer(fh):
+            self.assertIsInstance(fh, io.TextIOBase)
+            return True, {}
+
+        fmt, _ = self.registry.sniff(self.fp1)
+        self.assertEqual(fmt, 'textf')
+
+    def test_sniff_with_illegal_encoding(self):
+        binf = self.registry.create_format('binf', encoding='binary')
+        textf = self.registry.create_format('textf', encoding=None)
+
+        @binf.sniffer()
+        def binf_sniffer(fh):
+            return True, {}
+
+        @textf.sniffer()
+        def textf_sniffer(fh):
+            return True, {}
+
+        # Should skip binary sniffers
+        fmt, _ = self.registry.sniff(self.fp1, encoding=None)
+        self.assertEqual(fmt, 'textf')
+        # Should skip text sniffers
+        fmt, _ = self.registry.sniff(self.fp1, encoding='binary')
+        self.assertEqual(fmt, 'binf')
+
+        with self.assertRaises(ValueError):
+            self.registry.sniff([u'some content\n'], encoding='binary')
+
+        with self.assertRaises(ValueError):
+            binf_sniffer(self.fp1, encoding=None)
+
+        with self.assertRaises(ValueError):
+            textf_sniffer(self.fp1, encoding='binary')
+
+    def test_binary_fall_through(self):
+        binf = self.registry.create_format('binf', encoding='binary')
+        textf = self.registry.create_format('textf', encoding=None)
+
+        @binf.sniffer()
+        def binf_sniffer(fh):
+            self._check_binf = True
+            return False, {}
+
+        @textf.sniffer()
+        def textf_sniffer(fh):
+            self._check_textf = True
+            return True, {}
+
+        self._check_binf = False
+        self._check_textf = False
+
+        fmt, _ = self.registry.sniff(self.fp1)
+        self.assertEqual(fmt, 'textf')
+
+        self.assertTrue(self._check_binf)
+        self.assertTrue(self._check_textf)
+
+    def test_sniff_gzip(self):
+        expected = u"This is some content\nIt occurs on more than one line\n"
+
+        formata = self.registry.create_format('formata', encoding='binary')
+        formatb = self.registry.create_format('formatb')
+        formatc = self.registry.create_format('formatc')
+
+        @formata.sniffer()
+        def formata_sniffer(fh):
+            self._check_f1 = True
+            self.assertEqual(fh.read(), expected.encode('ascii'))
+            return False, {}
+
+        @formatb.sniffer()
+        def formatb_sniffer(fh):
+            self._check_f2 = True
+            self.assertEqual(fh.read(), expected)
+            return True, {}
+
+        @formatc.sniffer()
+        def formatc_sniffer(fh):
+            self._check_f3 = True
+            self.assertEqual(fh.read(), expected)
+            return False, {}
+
+        self._check_f1 = False
+        self._check_f2 = False
+        self._check_f3 = False
+        self.registry.sniff(get_data_path('example_file.gz'))
+        self.assertTrue(self._check_f1)
+        self.assertTrue(self._check_f2)
+        self.assertTrue(self._check_f3)
+
+    def test_text_skip_binary(self):
+        binf = self.registry.create_format('binf', encoding='binary')
+        textf = self.registry.create_format('textf', encoding=None)
+
+        @binf.sniffer()
+        def binf_sniffer(fh):
+            self._check_binf = True
+            return True, {}
+
+        @textf.sniffer()
+        def textf_sniffer(fh):
+            self._check_textf = True
+            return True, {}
+
+        self._check_binf = False
+        self._check_textf = False
+
+        fmt, _ = self.registry.sniff([u'text'])
+        self.assertEqual(fmt, 'textf')
+
+        self.assertFalse(self._check_binf)
+        self.assertTrue(self._check_textf)
+
+        self._check_binf = False
+        self._check_textf = False
+
+        fmt, _ = self.registry.sniff(self.fp1, encoding=None)
+        self.assertEqual(fmt, 'textf')
+
+        self.assertFalse(self._check_binf)
+        self.assertTrue(self._check_textf)
+
+    def test_text_skip_text(self):
+        binf = self.registry.create_format('binf', encoding='binary')
+        textf = self.registry.create_format('textf', encoding=None)
+
+        @binf.sniffer()
+        def binf_sniffer(fh):
+            self._check_binf = True
+            return True, {}
+
+        @textf.sniffer()
+        def textf_sniffer(fh):
+            self._check_textf = True
+            return True, {}
+
+        self._check_binf = False
+        self._check_textf = False
+
+        fmt, _ = self.registry.sniff(self.fp1, encoding='binary')
+        self.assertEqual(fmt, 'binf')
+
+        self.assertTrue(self._check_binf)
+        self.assertFalse(self._check_textf)
 
 
 class TestRead(RegistryTest):
     def test_format_and_into_are_none(self):
         fh = StringIO()
         with self.assertRaises(ValueError):
-            self.module.read(fh)
+            self.registry.read(fh)
 
         fh.close()
 
     def test_format_is_none(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format', TestClass)
+        @format1.reader(TestClass)
         def reader(fh):
+            self.assertIsInstance(fh, io.TextIOBase)
             return TestClass([int(x) for x in fh.read().split('\n')])
 
-        instance = self.module.read(fh, into=TestClass)
+        instance = self.registry.read(fh, into=TestClass)
         self.assertEqual(TestClass([1, 2, 3, 4]), instance)
         fh.close()
 
     def test_into_is_none(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_reader('format')
+        @format1.reader(None)
         def reader(fh):
+            self.assertIsInstance(fh, io.TextIOBase)
             for value in [int(x) for x in fh.read().split('\n')]:
                 yield value
 
-        generator = self.module.read(fh, format='format')
+        generator = self.registry.read(fh, format='format1')
+        self.assertIsInstance(generator, types.GeneratorType)
         first_run = True
         for a, b in zip(generator, [1, 2, 3, 4]):
             if first_run:
@@ -568,85 +902,56 @@ class TestRead(RegistryTest):
         fh.close()
 
     def test_into_is_none_real_file(self):
+        format1 = self.registry.create_format('format1')
+
         fp = self.fp1
         with open(fp, 'w') as fh:
             fh.write('1\n2\n3\n4')
 
         self._test_fh = None
 
-        @self.module.register_reader('format')
+        @format1.reader(None)
         def reader(fh):
             self._test_fh = fh
             for value in [int(x) for x in fh.read().split('\n')]:
                 yield value
 
-        generator = self.module.read(fp, format='format')
-        for a, b in zip(generator, [1, 2, 3, 4]):
+        generator = self.registry.read(fp, format='format1')
+        for a, b in zip_longest(generator, [1, 2, 3, 4]):
             self.assertEqual(a, b)
         self.assertTrue(self._test_fh.closed)
 
     def test_reader_does_not_exist(self):
+        fh = StringIO()
         with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.read(None, format='not_a_format', into=TestClass)
+            self.registry.read(fh, format='not_a_format', into=TestClass)
 
         self.assertTrue(TestClass.__name__ in str(cm.exception))
         self.assertTrue('not_a_format' in str(cm.exception))
 
         with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.read(None, format='not_a_format2')
+            self.registry.read(fh, format='not_a_format2')
 
         self.assertTrue('generator' in str(cm.exception))
         self.assertTrue('not_a_format2' in str(cm.exception))
 
-    def test_reader_is_not_generator(self):
-        fh = StringIO(u'1\n2\n3\n4')
-
-        @self.module.register_sniffer('format')
-        def sniffer(fh):
-            return '1' in fh.readline(), {}
-
-        @self.module.register_reader('format')
-        def reader(fh):
-            # Not a generator!
-            return TestClass([int(x) for x in fh.read().split('\n')])
-
-        with self.assertRaises(InvalidRegistrationError):
-            next(self.module.read(fh, format='format'))
-
-        fh.close()
-
-    def test_reader_empty_file(self):
-        fh = StringIO()
-
-        @self.module.register_sniffer('format')
-        def sniffer(fh):
-            return False, {}
-
-        @self.module.register_reader('format', TestClass)
-        def reader(fh):
-            return
-
-        with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.read(fh, into=TestClass)
-        self.assertIn('<emptyfile>', str(cm.exception))
-
-        fh.close()
-
     def test_reader_exists_with_verify_true(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             self.was_verified = True
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format', TestClass)
+        @format1.reader(TestClass)
         def reader(fh):
             return TestClass([int(x) for x in fh.read().split('\n')])
 
         self.was_verified = False
-        instance = self.module.read(fh, format='format', into=TestClass,
-                                    verify=True)
+        instance = self.registry.read(fh, format='format1', into=TestClass,
+                                      verify=True)
         self.assertEqual(TestClass([1, 2, 3, 4]), instance)
         self.assertTrue(self.was_verified)
 
@@ -654,21 +959,23 @@ class TestRead(RegistryTest):
         fh.seek(0)
 
         self.was_verified = False
-        instance = self.module.read(fh, format='format', into=TestClass)
+        instance = self.registry.read(fh, format='format1', into=TestClass)
         self.assertEqual(TestClass([1, 2, 3, 4]), instance)
         self.assertTrue(self.was_verified)
 
         fh.close()
 
     def test_warning_raised(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             self.was_verified = True
             return False, {}
 
-        @self.module.register_reader('format', TestClass)
+        @format1.reader(TestClass)
         def reader(fh):
             return TestClass([int(x) for x in fh.read().split('\n')])
 
@@ -676,8 +983,8 @@ class TestRead(RegistryTest):
             warnings.simplefilter("error")
             with self.assertRaises(FormatIdentificationWarning):
                 self.was_verified = False
-                instance = self.module.read(fh, format='format',
-                                            into=TestClass, verify=True)
+                instance = self.registry.read(fh, format='format1',
+                                              into=TestClass, verify=True)
                 self.assertEqual(TestClass([1, 2, 3, 4]), instance)
                 self.assertTrue(self.was_verified)
 
@@ -685,313 +992,608 @@ class TestRead(RegistryTest):
             warnings.simplefilter("error")
             with self.assertRaises(FormatIdentificationWarning):
                 self.was_verified = False
-                instance = self.module.read(fh, format='format',
-                                            into=TestClass)
+                instance = self.registry.read(fh, format='format1',
+                                              into=TestClass)
                 self.assertEqual(TestClass([1, 2, 3, 4]), instance)
                 self.assertTrue(self.was_verified)
 
         fh.close()
 
     def test_reader_exists_with_verify_false(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             self.was_verified = True
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format', TestClass)
+        @format1.reader(TestClass)
         def reader(fh):
             return TestClass([int(x) for x in fh.read().split('\n')])
 
         self.was_verified = False
-        instance = self.module.read(fh, format='format', into=TestClass,
-                                    verify=False)
+        instance = self.registry.read(fh, format='format1', into=TestClass,
+                                      verify=False)
         self.assertEqual(TestClass([1, 2, 3, 4]), instance)
         self.assertFalse(self.was_verified)
         fh.close()
 
     def test_reader_exists_real_file(self):
+        format1 = self.registry.create_format('format1')
+
         fp = self.fp1
         with open(fp, 'w') as fh:
             fh.write('1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format', TestClass)
+        @format1.reader(TestClass)
         def reader(fh):
             return TestClass([int(x) for x in fh.read().split('\n')])
 
-        instance = self.module.read(fp, format='format', into=TestClass)
+        instance = self.registry.read(fp, format='format1', into=TestClass)
         self.assertEqual(TestClass([1, 2, 3, 4]), instance)
 
     def test_read_kwargs_passed_generator(self):
-        @self.module.register_sniffer('format')
+        format1 = self.registry.create_format('format1')
+
+        @format1.sniffer()
         def sniffer(fh):
             return True, {'arg1': 15, 'arg2': 'abc'}
 
-        @self.module.register_reader('format')
+        @format1.reader(None)
         def reader(fh, **kwargs):
             self.assertEqual(kwargs['arg1'], 15)
             self.assertEqual(kwargs['arg2'], 'abc')
             self.assertEqual(kwargs['arg3'], [1])
             yield
 
-        next(self.module.read(StringIO(), format='format', arg3=[1]))
+        next(self.registry.read(StringIO(), format='format1', arg3=[1]))
 
     def test_read_kwargs_passed_and_override(self):
-        @self.module.register_sniffer('format')
+        format1 = self.registry.create_format('format1')
+
+        @format1.sniffer()
         def sniffer(fh):
             return True, {'arg1': 15, 'arg2': 'abc', 'override': 30}
 
-        @self.module.register_reader('format', TestClass)
+        @format1.reader(TestClass)
         def reader(fh, **kwargs):
             self.assertEqual(kwargs['arg1'], 15)
             self.assertEqual(kwargs['arg2'], 'abc')
             self.assertEqual(kwargs['arg3'], [1])
             return
 
-        self.module.read(StringIO(u'notempty'), into=TestClass, arg3=[1])
+        self.registry.read(StringIO(u'notempty'), into=TestClass, arg3=[1])
 
         with warnings.catch_warnings(record=True):
             warnings.simplefilter("error")
             # Should raise no warning and thus no error.
-            self.module.read(StringIO(u'notempty'), into=TestClass, arg3=[1],
-                             override=30)
+            self.registry.read(StringIO(u'notempty'), into=TestClass, arg3=[1],
+                               override=30)
             # Should raise a warning and thus an error.
             with self.assertRaises(ArgumentOverrideWarning):
-                self.module.read(StringIO(u'notempty'), into=TestClass,
-                                 arg3=[1], override=100)
+                self.registry.read(StringIO(u'notempty'), into=TestClass,
+                                   arg3=[1], override=100)
 
-    def test_that_mode_is_used(self):
-        fp = self.fp1
-        with open(fp, 'w') as fh:
-            fh.write('1\n2\n3\n4')
+    def test_that_encoding_is_used(self):
+        format1 = self.registry.create_format('format1')
+
+        fp = get_data_path('big5_file')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
-            return '1' in fh.readline(), {}
+            return u'\u4f60' in fh.readline(), {}
 
-        @self.module.register_reader('format', TestClass)
+        @format1.reader(TestClass)
         def reader(fh):
-            self.assertEqual(self.expected_mode, fh.mode)
-            return TestClass([int(x) for x in fh.read().split('\n')])
+            self.assertEqual(self._expected_enc, fh.encoding)
+            return TestClass(fh.readlines())
 
-        self.expected_mode = 'U'
-        instance = self.module.read(fp, format='format', into=TestClass)
-        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+        @format1.reader(None)
+        def reader_gen(fh):
+            self.assertEqual(self._expected_enc, fh.encoding)
+            yield TestClass(fh.readlines())
 
-        self.expected_mode = 'r'
-        instance = self.module.read(fp, format='format', into=TestClass,
-                                    mode='r')
-        self.assertEqual(TestClass([1, 2, 3, 4]), instance)
+        self._expected_enc = 'big5'
+        instance = self.registry.read(fp, into=TestClass, encoding='big5')
+        self.assertEqual(TestClass([u'\u4f60\u597d\n']), instance)
+
+        self._expected_enc = 'big5'
+        gen = self.registry.read(fp, format='format1', encoding='big5')
+        self.assertEqual(TestClass([u'\u4f60\u597d\n']), next(gen))
+
+    def test_non_default_encoding(self):
+        format1 = self.registry.create_format('format1', encoding='big5')
+
+        fp = get_data_path('big5_file')
+
+        @format1.sniffer()
+        def sniffer(fh):
+            return True, {}
+
+        @format1.reader(TestClass)
+        def reader(fh):
+            self.assertEqual(self._expected_enc, fh.encoding)
+            return TestClass(fh.readlines())
+
+        @format1.reader(None)
+        def reader_gen(fh):
+            self.assertEqual(self._expected_enc, fh.encoding)
+            yield TestClass(fh.readlines())
+
+        self._expected_enc = 'big5'
+        instance = self.registry.read(fp, into=TestClass)
+        self.assertEqual(TestClass([u'\u4f60\u597d\n']), instance)
+
+        gen = self.registry.read(fp, format='format1')
+        self.assertEqual(TestClass([u'\u4f60\u597d\n']), next(gen))
+        gen.close()
+
+        self._expected_enc = 'utf8'
+        with self.assertRaises(UnicodeDecodeError):
+            self.registry.read(fp, into=TestClass, encoding='utf8')
+
+        with self.assertRaises(UnicodeDecodeError):
+            self.registry.read(fp, format='format1', encoding='utf8')
+
+    def test_that_newline_is_used(self):
+        formatx = self.registry.create_format('formatx')
+
+        fp = get_data_path('real_file')
+
+        @formatx.sniffer()
+        def sniffer(fh):
+            return True, {}
+
+        @formatx.reader(TestClass)
+        def reader(fh):
+            return TestClass(fh.readlines())
+
+        @formatx.reader(None)
+        def reader_gen(fh):
+            yield TestClass(fh.readlines())
+
+        instance = self.registry.read(fp, into=TestClass, newline='\r')
+        self.assertEqual(instance, TestClass(['a\nb\nc\nd\ne\n']))
+
+        gen = self.registry.read(fp, format='formatx', newline='\r')
+        self.assertEqual(next(gen), TestClass(['a\nb\nc\nd\ne\n']))
+        gen.close()
+
+    def test_non_default_newline(self):
+        formatx = self.registry.create_format('formatx', newline='\r')
+
+        fp = get_data_path('real_file')
+
+        @formatx.sniffer()
+        def sniffer(fh):
+            return True, {}
+
+        @formatx.reader(TestClass)
+        def reader(fh):
+            return TestClass(fh.readlines())
+
+        @formatx.reader(None)
+        def reader_gen(fh):
+            yield TestClass(fh.readlines())
+
+        instance = self.registry.read(fp, into=TestClass)
+        self.assertEqual(instance, TestClass(['a\nb\nc\nd\ne\n']))
+
+        gen = self.registry.read(fp, format='formatx')
+        self.assertEqual(next(gen), TestClass(['a\nb\nc\nd\ne\n']))
+        gen.close()
+
+        instance = self.registry.read(fp, into=TestClass, newline=None)
+        self.assertEqual(instance, TestClass(['a\n', 'b\n', 'c\n', 'd\n',
+                                              'e\n']))
+
+        gen = self.registry.read(fp, format='formatx', newline=None)
+        self.assertEqual(next(gen), TestClass(['a\n', 'b\n', 'c\n', 'd\n',
+                                               'e\n']))
+        gen.close()
 
     def test_file_sentinel_many(self):
+        format1 = self.registry.create_format('format1')
+
         extra = get_data_path('real_file')
         extra_2 = get_data_path('real_file_2')
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format', TestClass)
-        def reader(fh, extra=self.module.FileSentinel, other=2,
-                   extra_2=self.module.FileSentinel):
+        @format1.reader(TestClass)
+        def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
             self.assertEqual('a\nb\nc\nd\ne\n', extra.read())
             self.assertEqual('!\n@\n#\n$\n%\nThe realest.\n', extra_2.read())
             return TestClass([int(x) for x in fh.read().split('\n')])
 
-        instance = self.module.read(fh, format='format', into=TestClass,
-                                    extra=extra, extra_2=extra_2)
+        instance = self.registry.read(fh, format='format1', into=TestClass,
+                                      extra=extra, extra_2=extra_2)
         self.assertEqual(TestClass([1, 2, 3, 4]), instance)
 
         fh.close()
 
     def test_file_sentinel_converted_to_none(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format', TestClass)
-        def reader(fh, extra=self.module.FileSentinel, other=2,
-                   extra_2=self.module.FileSentinel):
+        @format1.reader(TestClass)
+        def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
             self.assertIsNone(extra)
             self.assertIsNone(extra_2)
             return TestClass([int(x) for x in fh.read().split('\n')])
 
-        instance = self.module.read(fh, format='format', into=TestClass)
+        instance = self.registry.read(fh, format='format1', into=TestClass)
         self.assertEqual(TestClass([1, 2, 3, 4]), instance)
 
         fh.close()
 
     def test_file_sentinel_pass_none(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format', TestClass)
-        def reader(fh, extra=self.module.FileSentinel, other=2,
-                   extra_2=self.module.FileSentinel):
+        @format1.reader(TestClass)
+        def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
             self.assertIsNone(extra)
             self.assertIsNone(extra_2)
             return TestClass([int(x) for x in fh.read().split('\n')])
 
-        instance = self.module.read(fh, format='format', into=TestClass,
-                                    extra=None)
+        instance = self.registry.read(fh, format='format1', into=TestClass,
+                                      extra=None)
         self.assertEqual(TestClass([1, 2, 3, 4]), instance)
 
         fh.close()
 
     def test_file_sentinel_generator_many(self):
+        format1 = self.registry.create_format('format1')
+
         extra = get_data_path('real_file')
         extra_2 = get_data_path('real_file_2')
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format')
-        def reader(fh, extra=self.module.FileSentinel, other=2,
-                   extra_2=self.module.FileSentinel):
+        @format1.reader(None)
+        def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
             self.assertEqual('a\nb\nc\nd\ne\n', extra.read())
             self.assertEqual('!\n@\n#\n$\n%\nThe realest.\n', extra_2.read())
             yield TestClass([int(x) for x in fh.read().split('\n')])
 
-        gen = self.module.read(fh, format='format', extra=extra,
-                               extra_2=extra_2)
+        gen = self.registry.read(fh, format='format1', extra=extra,
+                                 extra_2=extra_2)
         self.assertEqual(TestClass([1, 2, 3, 4]), next(gen))
 
         fh.close()
 
     def test_file_sentinel_converted_to_none_generator(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format')
-        def reader(fh, extra=self.module.FileSentinel, other=2,
-                   extra_2=self.module.FileSentinel):
+        @format1.reader(None)
+        def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
             self.assertIsNone(extra)
             self.assertIsNone(extra_2)
             yield TestClass([int(x) for x in fh.read().split('\n')])
 
-        gen = self.module.read(fh, format='format')
+        gen = self.registry.read(fh, format='format1')
         self.assertEqual(TestClass([1, 2, 3, 4]), next(gen))
 
         fh.close()
 
     def test_file_sentinel_pass_none_generator(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO(u'1\n2\n3\n4')
 
-        @self.module.register_sniffer('format')
+        @format1.sniffer()
         def sniffer(fh):
             return '1' in fh.readline(), {}
 
-        @self.module.register_reader('format')
-        def reader(fh, extra=self.module.FileSentinel, other=2,
-                   extra_2=self.module.FileSentinel):
+        @format1.reader(None)
+        def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
             self.assertIsNone(extra)
             self.assertIsNone(extra_2)
             yield TestClass([int(x) for x in fh.read().split('\n')])
 
-        gen = self.module.read(fh, format='format', extra=None)
+        gen = self.registry.read(fh, format='format1', extra=None)
         self.assertEqual(TestClass([1, 2, 3, 4]), next(gen))
 
         fh.close()
 
+    def test_read_with_illegal_encoding(self):
+        binf = self.registry.create_format('binf', encoding='binary')
+        textf = self.registry.create_format('textf', encoding=None)
+
+        @binf.sniffer()
+        def binf_sniffer(fh):
+            return True, {}
+
+        @binf.reader(TestClass)
+        def binf_reader(fh):
+            return TestClass(['bin'])
+
+        @binf.reader(None)
+        def binf_reader_gen(fh):
+            yield TestClass(['bin'])
+
+        @textf.sniffer()
+        def textf_sniffer(fh):
+            return True, {}
+
+        @textf.reader(TestClass)
+        def textf_reader(fh):
+            return TestClass(['text'])
+
+        @textf.reader(None)
+        def textf_reader_gen(fh):
+            yield TestClass(['text'])
+
+        # Should skip binary sniffers
+        instance = self.registry.read(self.fp1, encoding=None, into=TestClass)
+        self.assertEqual(instance, TestClass(['text']))
+        gen = self.registry.read(self.fp1, encoding=None, format='textf')
+        self.assertEqual(next(gen), TestClass(['text']))
+        gen.close()
+        # Should skip text sniffers
+        instance = self.registry.read(self.fp1, encoding='binary',
+                                      into=TestClass)
+        self.assertEqual(instance, TestClass(['bin']))
+        gen = self.registry.read(self.fp1, encoding='binary', format='binf')
+        self.assertEqual(next(gen), TestClass(['bin']))
+        gen.close()
+
+        with self.assertRaises(ValueError):
+            self.registry.read([u'some content\n'], encoding='binary',
+                               into=TestClass)
+
+        with self.assertRaises(ValueError):
+            self.registry.read([u'some content\n'], format='textf',
+                               encoding='binary', into=TestClass)
+
+        with self.assertRaises(ValueError):
+            self.registry.read([u'some content\n'], format='textf',
+                               encoding='binary', verify=False, into=TestClass)
+
+        with self.assertRaises(ValueError):
+            self.registry.read([u'some content\n'], format='textf',
+                               encoding='binary')
+
+        with self.assertRaises(ValueError):
+            self.registry.read([u'some content\n'], format='textf',
+                               encoding='binary', verify=False)
+
+        with self.assertRaises(ValueError):
+            self.registry.read(self.fp1, format='binf',
+                               encoding=None, into=TestClass)
+
+        with self.assertRaises(ValueError):
+            self.registry.read(self.fp1, format='binf',
+                               encoding=None, verify=False, into=TestClass)
+
+        with self.assertRaises(ValueError):
+            self.registry.read(self.fp1, format='binf',
+                               encoding=None)
+
+        with self.assertRaises(ValueError):
+            self.registry.read(self.fp1, format='binf',
+                               encoding=None, verify=False)
+
+    def test_read_with_binary_encoding(self):
+        binf = self.registry.create_format('binf', encoding='binary')
+
+        @binf.reader(TestClass)
+        def reader1(fh):
+            self.assertIsInstance(fh, (io.BufferedReader, io.BufferedRandom))
+            return TestClass(['woo'])
+
+        @binf.reader(None)
+        def reader2(fh):
+            self.assertIsInstance(fh, (io.BufferedReader, io.BufferedRandom))
+            yield TestClass(['woo'])
+
+        instance = self.registry.read(self.fp1, format='binf', verify=False,
+                                      into=TestClass)
+        self.assertEqual(TestClass(['woo']), instance)
+
+        gen = self.registry.read(self.fp1, format='binf', verify=False,
+                                 into=None)
+        self.assertEqual(TestClass(['woo']), next(gen))
+        gen.close()
+
+    def test_io_kwargs_passed(self):
+        format1 = self.registry.create_format('format1')
+
+        @format1.sniffer()
+        def sniffer(fh):
+            return True, {}
+
+        @format1.reader(TestClass)
+        def reader1(fh):
+            self.assertEqual(fh.errors, 'replace')
+            return TestClass(['woo'])
+
+        @format1.reader(None)
+        def reader1_gen(fh):
+            self.assertEqual(fh.errors, 'replace')
+            yield TestClass(['woo'])
+
+        obj = self.registry.read(self.fp1, into=TestClass, errors='replace')
+        self.assertEqual(obj, TestClass(['woo']))
+        gen = self.registry.read(self.fp1, format='format1', errors='replace')
+        self.assertEqual(next(gen), TestClass(['woo']))
+        gen.close()
+
 
 class TestWrite(RegistryTest):
     def test_writer_does_not_exist(self):
         fh = StringIO()
         with self.assertRaises(UnrecognizedFormatError) as cm:
-            self.module.write({}, format='not_a_format', into=fh)
+            self.registry.write({}, format='not_a_format', into=fh)
 
         self.assertTrue('not_a_format' in str(cm.exception))
         self.assertTrue(str(fh) in str(cm.exception))
         fh.close()
 
     def test_writer_exists(self):
+        format1 = self.registry.create_format('format1')
+
         obj = TestClass(['1', '2', '3', '4'])
         fh = StringIO()
 
-        @self.module.register_writer('format', TestClass)
+        @format1.writer(TestClass)
         def writer(obj, fh):
+            self.assertIsInstance(fh, io.TextIOBase)
             fh.write(u'\n'.join(obj.list))
 
-        self.module.write(obj, format='format', into=fh)
+        self.registry.write(obj, format='format1', into=fh)
         fh.seek(0)
         self.assertEqual("1\n2\n3\n4", fh.read())
         fh.close()
 
     def test_writer_exists_real_file(self):
+        format1 = self.registry.create_format('format1')
+
         obj = TestClass(['1', '2', '3', '4'])
         fp = self.fp1
 
-        @self.module.register_writer('format', TestClass)
+        @format1.writer(TestClass)
         def writer(obj, fh):
-            fh.write('\n'.join(obj.list))
+            self.assertIsInstance(fh, io.TextIOBase)
+            fh.write(u'\n'.join(obj.list))
 
-        self.module.write(obj, format='format', into=fp)
+        self.registry.write(obj, format='format1', into=fp)
 
-        with open(fp, 'U') as fh:
-            self.assertEqual("1\n2\n3\n4", fh.read())
+        with io.open(fp) as fh:
+            self.assertEqual(u"1\n2\n3\n4", fh.read())
 
     def test_writer_passed_kwargs(self):
-        @self.module.register_reader('format')
+        format1 = self.registry.create_format('format1')
+
+        @format1.reader(None)
         def reader(fh):
             yield
 
-        @self.module.register_writer('format')
+        @format1.writer(None)
         def writer(obj, fh, **kwargs):
             self.assertEqual(kwargs['passed'], True)
 
-        generator = self.module.get_reader('format')(None)
-        self.module.write(generator, format='format',
-                          into=StringIO(), passed=True)
+        generator = self.registry.get_reader('format1', None)([])
+        self.registry.write(generator, format='format1',
+                            into=StringIO(), passed=True)
 
-    def test_that_mode_is_used(self):
-        obj = TestClass(['1', '2', '3', '4'])
+    def test_that_encoding_is_used(self):
+        format1 = self.registry.create_format('format1')
+
+        obj = TestClass([u'\u4f60\u597d\n'])  # Ni Hau
+        fp = self.fp1
+
+        @format1.writer(TestClass)
+        def writer(obj, fh):
+            fh.write(u''.join(obj.list))
+            self.assertEqual(self._expected_encoding, fh.encoding)
+
+        self._expected_encoding = 'big5'
+        self.registry.write(obj, format='format1', into=fp, encoding='big5')
+
+        with io.open(fp, mode='rb') as fh:
+            # This would have been b'\xe4\xbd\xa0\xe5\xa5\xbd\n' in utf8
+            self.assertEqual(b'\xa7A\xa6n\n', fh.read())
+
+    def test_non_default_encoding(self):
+        format1 = self.registry.create_format('format1', encoding='big5')
+
+        obj = TestClass([u'\u4f60\u597d\n'])  # Ni Hau
+        fp = self.fp1
+
+        @format1.writer(TestClass)
+        def writer(obj, fh):
+            fh.write(u''.join(obj.list))
+            self.assertEqual(self._expected_encoding, fh.encoding)
+
+        self._expected_encoding = 'big5'
+        self.registry.write(obj, format='format1', into=fp)
+
+        with io.open(fp, mode='rb') as fh:
+            self.assertEqual(b'\xa7A\xa6n\n', fh.read())
+
+        self._expected_encoding = 'utf8'
+        self.registry.write(obj, format='format1', into=fp, encoding='utf8')
+
+        with io.open(fp, mode='rb') as fh:
+            self.assertEqual(b'\xe4\xbd\xa0\xe5\xa5\xbd\n', fh.read())
+
+    def test_that_newline_is_used(self):
+        format1 = self.registry.create_format('format1')
+
+        obj = TestClass([u'a\n', u'b\n', u'c\n'])
+        fp = self.fp1
+
+        @format1.writer(TestClass)
+        def writer(obj, fh):
+            fh.write(u''.join(obj.list))
+
+        self.registry.write(obj, format='format1', into=fp, newline='\r')
+
+        with io.open(fp, mode='rb') as fh:
+            self.assertEqual(b'a\rb\rc\r', fh.read())
+
+    def test_non_default_newline(self):
+        format1 = self.registry.create_format('format1', newline='\r')
+
+        obj = TestClass([u'a\n', u'b\n', u'c\n'])
         fp = self.fp1
 
-        @self.module.register_writer('format', TestClass)
+        @format1.writer(TestClass)
         def writer(obj, fh):
-            fh.write('\n'.join(obj.list))
-            self.assertEqual(self.expected_mode, fh.mode)
+            fh.write(u''.join(obj.list))
 
-        self.expected_mode = 'w'
-        self.module.write(obj, format='format', into=fp)
+        self.registry.write(obj, format='format1', into=fp)
 
-        with open(fp, 'U') as fh:
-            self.assertEqual("1\n2\n3\n4", fh.read())
+        with io.open(fp, mode='rb') as fh:
+            self.assertEqual(b'a\rb\rc\r', fh.read())
 
-        fp = self.fp2
-        self.expected_mode = 'a'
-        self.module.write(obj, format='format', into=fp, mode='a')
+        self.registry.write(obj, format='format1', into=fp, newline='\n')
 
-        with open(fp, 'U') as fh:
-            self.assertEqual("1\n2\n3\n4", fh.read())
+        with io.open(fp, mode='rb') as fh:
+            self.assertEqual(b'a\nb\nc\n', fh.read())
 
     def test_file_sentinel_many(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO()
 
-        @self.module.register_writer('format', TestClass)
-        def writer(obj, fh, extra=self.module.FileSentinel, other=2,
-                   extra_2=self.module.FileSentinel):
-            extra.write('oh yeah...')
-            extra_2.write('oh no...')
+        @format1.writer(TestClass)
+        def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
+            extra.write(u'oh yeah...')
+            extra_2.write(u'oh no...')
 
-        self.module.write(TestClass([]), format='format', into=fh,
-                          extra=self.fp1, extra_2=self.fp2)
+        self.registry.write(TestClass([]), format='format1', into=fh,
+                            extra=self.fp1, extra_2=self.fp2)
         with open(self.fp1) as f1:
             self.assertEqual('oh yeah...', f1.read())
 
@@ -1001,35 +1603,100 @@ class TestWrite(RegistryTest):
         fh.close()
 
     def test_file_sentinel_converted_to_none(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO()
 
-        @self.module.register_writer('format', TestClass)
-        def writer(obj, fh, extra=self.module.FileSentinel, other=2,
-                   extra_2=self.module.FileSentinel):
+        @format1.writer(TestClass)
+        def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
             self.assertIsNone(extra)
             self.assertIsNone(extra_2)
 
-        self.module.write(TestClass([]), format='format', into=fh)
+        self.registry.write(TestClass([]), format='format1', into=fh)
 
         fh.close()
 
     def test_file_sentinel_pass_none(self):
+        format1 = self.registry.create_format('format1')
+
         fh = StringIO()
 
-        @self.module.register_writer('format', TestClass)
-        def writer(obj, fh, extra=self.module.FileSentinel, other=2,
-                   extra_2=self.module.FileSentinel):
+        @format1.writer(TestClass)
+        def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
             self.assertIsNone(extra)
             self.assertIsNone(extra_2)
 
-        self.module.write(TestClass([]), format='format', into=fh, extra=None)
+        self.registry.write(TestClass([]), format='format1', into=fh,
+                            extra=None)
 
         fh.close()
 
+    def test_write_with_illegal_encoding(self):
+        binf = self.registry.create_format('binf', encoding='binary')
+        textf = self.registry.create_format('textf', encoding=None)
+
+        @binf.writer(TestClass)
+        def writer(obj, fh):
+            pass
+
+        @textf.writer(TestClass)
+        def writer2(obj, fh):
+            pass
+
+        with self.assertRaises(ValueError):
+            self.registry.write(TestClass([]), into=self.fp1, format='binf',
+                                encoding=None)
+
+        with self.assertRaises(ValueError):
+            self.registry.write(TestClass([]), into=self.fp1, format='textf',
+                                encoding='binary')
+
+    def test_write_binary_format(self):
+        format1 = self.registry.create_format('format1', encoding='binary')
+
+        obj = TestClass([b'a\n', b'b\n', b'c\n'])
+        fp = self.fp1
+
+        @format1.writer(TestClass)
+        def writer(obj, fh):
+            self.assertIsInstance(fh, (io.BufferedWriter, io.BufferedRandom))
+            fh.write(b''.join(obj.list))
+
+        self.registry.write(obj, format='format1', into=fp)
+
+        with io.open(fp, mode='rb') as fh:
+            self.assertEqual(b'a\nb\nc\n', fh.read())
+
+    def test_io_kwargs_passed(self):
+        format1 = self.registry.create_format('format1', encoding='ascii')
+
+        obj = TestClass([u'a\n', u'b\n', u'c\n'])
+        fp = self.fp1
+        f = io.BytesIO()
+
+        @format1.writer(TestClass)
+        def writer(obj, fh):
+            iterator = iter(obj.list)
+            fh.write(next(iterator))
+            fh.flush()  # Flush should be a noop for bz2
+            for l in iterator:
+                fh.write(l)
+
+        self.registry.write(obj, format='format1', into=fp, compression='bz2')
+        self.registry.write(obj, format='format1', into=f, compression='bz2')
+        expected = (
+            b'BZh91AY&SY\x03\x89\x0c\xa6\x00\x00\x01\xc1\x00\x00\x108\x00 \x00'
+            b'!\x9ah3M\x1c\xb7\x8b\xb9"\x9c(H\x01\xc4\x86S\x00')
 
-class TestInitializeOOPInterface(RegistryTest):
+        with io.open(fp, mode='rb') as fh:
+            self.assertEqual(expected, fh.read())
+
+        self.assertEqual(expected, f.getvalue())
+
+
+class TestMonkeyPatch(RegistryTest):
     def setUp(self):
-        super(TestInitializeOOPInterface, self).setUp()
+        super(TestMonkeyPatch, self).setUp()
 
         class UnassumingClass(object):
             pass
@@ -1037,30 +1704,37 @@ class TestInitializeOOPInterface(RegistryTest):
         class ClassWithDefault(object):
             default_write_format = 'favfmt'
 
+        class NoMonkeySee(object):
+            pass
+
         self.unassuming_class = UnassumingClass
         self.class_with_default = ClassWithDefault
+        self.no_monkey_see = NoMonkeySee
 
     def test_no_readers_writers(self):
-        self.module.initialize_oop_interface()
+        self.registry.monkey_patch()
         self.assertFalse(hasattr(self.unassuming_class, 'read'))
         self.assertFalse(hasattr(self.unassuming_class, 'write'))
         self.assertFalse(hasattr(self.class_with_default, 'read'))
         self.assertFalse(hasattr(self.class_with_default, 'write'))
 
     def test_readers_only(self):
-        @self.module.register_reader('favfmt', self.unassuming_class)
+        favfmt = self.registry.create_format('favfmt')
+        favfmt2 = self.registry.create_format('favfmt2')
+
+        @favfmt.reader(self.unassuming_class)
         def fvfmt_to_unasumming_class(fh):
             return
 
-        @self.module.register_reader('favfmt')
+        @favfmt.reader(None)
         def fvfmt_to_gen(fh):
             yield
 
-        @self.module.register_reader('favfmt2', self.unassuming_class)
+        @favfmt2.reader(self.unassuming_class)
         def fvfmt2_to_unasumming_class(fh):
             return
 
-        self.module.initialize_oop_interface()
+        self.registry.monkey_patch()
 
         self.assertTrue(hasattr(self.unassuming_class, 'read'))
         self.assertFalse(hasattr(self.unassuming_class, 'write'))
@@ -1071,19 +1745,22 @@ class TestInitializeOOPInterface(RegistryTest):
         self.assertIn('favfmt2', self.unassuming_class.read.__doc__)
 
     def test_writers_only(self):
-        @self.module.register_writer('favfmt', self.class_with_default)
-        def favfmt(fh):
+        favfmt = self.registry.create_format('favfmt')
+        favfmt2 = self.registry.create_format('favfmt2')
+
+        @favfmt.writer(self.class_with_default)
+        def favfmt_writer(fh):
             pass
 
-        @self.module.register_writer('favfmt')
+        @favfmt.writer(None)
         def gen_to_favfmt(fh):
             pass
 
-        @self.module.register_writer('favfmt2', self.class_with_default)
-        def favfmt2(fh):
+        @favfmt2.writer(self.class_with_default)
+        def favfmt2_writer(fh):
             pass
 
-        self.module.initialize_oop_interface()
+        self.registry.monkey_patch()
 
         self.assertFalse(hasattr(self.unassuming_class, 'read'))
         self.assertFalse(hasattr(self.unassuming_class, 'write'))
@@ -1094,56 +1771,70 @@ class TestInitializeOOPInterface(RegistryTest):
         self.assertIn('favfmt2', self.class_with_default.write.__doc__)
 
     def test_writers_no_default_format(self):
-        @self.module.register_writer('favfmt', self.unassuming_class)
-        def favfmt(fh):
+        favfmt = self.registry.create_format('favfmt')
+        favfmt2 = self.registry.create_format('favfmt2')
+
+        @favfmt.writer(self.unassuming_class)
+        def favfmt_writer(fh):
             pass
 
-        @self.module.register_writer('favfmt')
+        @favfmt.writer(None)
         def gen_to_favfmt(fh):
             pass
 
-        @self.module.register_writer('favfmt2', self.unassuming_class)
-        def favfmt2(fh):
+        @favfmt2.writer(self.unassuming_class)
+        def favfmt2_writer(fh):
             pass
         with self.assertRaises(NotImplementedError) as cm:
-            self.module.initialize_oop_interface()
+            self.registry.monkey_patch()
 
         self.assertIn('default_write_format', str(cm.exception))
 
     def test_readers_writers(self):
-        @self.module.register_reader('favfmt', self.unassuming_class)
+        favfmt = self.registry.create_format('favfmt')
+        favfmt2 = self.registry.create_format('favfmt2')
+
+        @favfmt.reader(self.unassuming_class)
         def fvfmt_to_unasumming_class(fh):
             return
 
-        @self.module.register_reader('favfmt', self.class_with_default)
+        @favfmt.reader(self.class_with_default)
         def fvfmt_to_class_w_default(fh):
             return
 
-        @self.module.register_reader('favfmt')
+        @favfmt.reader(None)
         def fvfmt_to_gen(fh):
             yield
 
-        @self.module.register_reader('favfmt2', self.unassuming_class)
+        @favfmt2.reader(self.unassuming_class)
         def fvfmt2_to_unasumming_class(fh):
             return
 
-        @self.module.register_reader('favfmt2', self.class_with_default)
+        @favfmt2.reader(self.class_with_default)
         def fvfmt2_to_class_w_default(fh):
             return
 
-        @self.module.register_writer('favfmt', self.class_with_default)
-        def favfmt(fh):
+        @favfmt.writer(self.class_with_default)
+        def favfmt_writer(fh):
             pass
 
-        @self.module.register_writer('favfmt')
+        @favfmt.writer(None)
         def gen_to_favfmt(fh):
             pass
 
-        @self.module.register_writer('favfmt2', self.class_with_default)
-        def favfmt2(fh):
+        @favfmt2.writer(self.class_with_default)
+        def favfmt2_writer(fh):
+            pass
+
+        @favfmt2.reader(self.no_monkey_see, monkey_patch=True)
+        def favfmt2_to_monkey(fh):
+            pass
+
+        @favfmt2.writer(self.no_monkey_see, monkey_patch=False)
+        def monkey_to_favfmt2(fh):
             pass
 
-        self.module.initialize_oop_interface()
+        self.registry.monkey_patch()
 
         self.assertTrue(hasattr(self.unassuming_class, 'read'))
         self.assertFalse(hasattr(self.unassuming_class, 'write'))
@@ -1151,6 +1842,9 @@ class TestInitializeOOPInterface(RegistryTest):
         self.assertTrue(hasattr(self.class_with_default, 'read'))
         self.assertTrue(hasattr(self.class_with_default, 'write'))
 
+        self.assertTrue(hasattr(self.no_monkey_see, 'read'))
+        self.assertFalse(hasattr(self.no_monkey_see, 'write'))
+
         self.assertIn('favfmt', self.unassuming_class.read.__doc__)
         self.assertIn('favfmt2', self.unassuming_class.read.__doc__)
 
@@ -1160,20 +1854,23 @@ class TestInitializeOOPInterface(RegistryTest):
         self.assertIn('favfmt', self.class_with_default.write.__doc__)
         self.assertIn('favfmt2', self.class_with_default.write.__doc__)
 
+        self.assertIn('favfmt2', self.no_monkey_see.read.__doc__)
+
     def test_read_kwargs_passed(self):
+        favfmt = self.registry.create_format('favfmt')
         self.was_called = False
 
-        @self.module.register_sniffer('favfmt')
+        @favfmt.sniffer()
         def fvfmt_sniffer(fh):
             return True, {}
 
-        @self.module.register_reader('favfmt', self.class_with_default)
+        @favfmt.reader(self.class_with_default)
         def fvfmt_to_class_w_default(fh, **kwargs):
             self.assertEqual('a', kwargs['a'])
             self.assertEqual(123, kwargs['b'])
             self.was_called = True
 
-        self.module.initialize_oop_interface()
+        self.registry.monkey_patch()
         fh = StringIO(u'notempty')
         self.class_with_default.read(fh, a='a', b=123)
 
@@ -1181,15 +1878,16 @@ class TestInitializeOOPInterface(RegistryTest):
         fh.close()
 
     def test_write_kwargs_passed(self):
+        favfmt = self.registry.create_format('favfmt')
         self.was_called = False
 
-        @self.module.register_writer('favfmt', self.class_with_default)
-        def favfmt(obj, fh, **kwargs):
+        @favfmt.writer(self.class_with_default)
+        def favfmt_writer(obj, fh, **kwargs):
             self.assertEqual('a', kwargs['a'])
             self.assertEqual(123, kwargs['b'])
             self.was_called = True
 
-        self.module.initialize_oop_interface()
+        self.registry.monkey_patch()
         fh = StringIO()
         self.class_with_default().write(fh, a='a', b=123)
 
@@ -1197,32 +1895,36 @@ class TestInitializeOOPInterface(RegistryTest):
         fh.close()
 
 
-class TestEmptyFileSniffer(unittest.TestCase):
-    def test_blank_file(self):
-        fh = StringIO()
-        self.assertTrue(empty_file_sniffer(fh)[0])
-        fh.close()
-
-    def test_whitespace_file(self):
-        fh = StringIO(u' ')
-        self.assertTrue(empty_file_sniffer(fh)[0])
-        fh.close()
-        fh = StringIO(u'\n')
-        self.assertTrue(empty_file_sniffer(fh)[0])
-        fh.close()
-        fh = StringIO(u'\t')
-        self.assertTrue(empty_file_sniffer(fh)[0])
-        fh.close()
-
-    def test_mixed_whitespace_file(self):
-        fh = StringIO(u'\n\n\t\n \t \t \n \n \n\n')
-        self.assertTrue(empty_file_sniffer(fh)[0])
-        fh.close()
-
-    def test_not_empty_file(self):
-        fh = StringIO(u'\n\n\t\n a\t \t \n \n \n\n')
-        self.assertFalse(empty_file_sniffer(fh)[0])
-        fh.close()
+class TestModuleFunctions(unittest.TestCase):
+
+    def test_sniff_matches(self):
+        exp = io_registry.sniff([u'(a, b);'])
+        result = sniff([u'(a, b);'])
+        self.assertEqual(exp, result)
+        self.assertEqual('newick', exp[0])
+        self.assertEqual({}, exp[1])
+
+    def test_read_matches(self):
+        input = [u'>\n', u'ACGT\n']
+        exp = io_registry.read(input, into=DNA)
+        result = read(input, into=DNA)
+        self.assertEqual(exp, result)
+        self.assertEqual(exp, DNA('ACGT', metadata={u'id': u'',
+                                                    u'description': u''}))
+
+    def test_write_matches(self):
+        input = DNA('ACGT')
+        exp = io_registry.write(input, format='fasta', into=[])
+        result = write(input, format='fasta', into=[])
+        self.assertEqual(exp, result)
+        self.assertEqual(exp, [u'>\n', u'ACGT\n'])
+
+    def test_create_format_matches(self):
+        with self.assertRaises(DuplicateRegistrationError):
+            io_registry.create_format('fasta')
+
+        with self.assertRaises(DuplicateRegistrationError):
+            create_format('fasta')
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/skbio/io/tests/test_util.py b/skbio/io/tests/test_util.py
index 797cb9d..7cdeef5 100644
--- a/skbio/io/tests/test_util.py
+++ b/skbio/io/tests/test_util.py
@@ -6,118 +6,557 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from six import StringIO, BytesIO
+from __future__ import absolute_import, division, print_function
 
 import unittest
 import tempfile
+import shutil
+import io
+import os.path
 
-from skbio.io.util import open_file, open_files, _is_string_or_bytes
+import httpretty
 
+import skbio.io
+from skbio.io.registry import open_file
+from skbio.util import get_data_path
 
-class TestFilePathOpening(unittest.TestCase):
-    def test_is_string_or_bytes(self):
-        self.assertTrue(_is_string_or_bytes('foo'))
-        self.assertTrue(_is_string_or_bytes(u'foo'))
-        self.assertTrue(_is_string_or_bytes(b'foo'))
-        self.assertFalse(_is_string_or_bytes(StringIO('bar')))
-        self.assertFalse(_is_string_or_bytes([1]))
 
-    def test_file_closed(self):
-        """File gets closed in decorator"""
-        f = tempfile.NamedTemporaryFile('r')
-        filepath = f.name
-        with open_file(filepath) as fh:
-            pass
-        self.assertTrue(fh.closed)
+class TestOpen(unittest.TestCase):
+    def test_open_invalid_mode(self):
+        with self.assertRaises(ValueError):
+            skbio.io.open([], mode='a')
+
+    def test_open_invalid_source(self):
+        with self.assertRaises(skbio.io.IOSourceError):
+            skbio.io.open(42)
+
+    def test_open_invalid_source_compression(self):
+        with self.assertRaises(ValueError):
+            skbio.io.open([u'foo'], compression='gzip')
+
+    def test_open_invalid_source_encoding(self):
+        with self.assertRaises(ValueError):
+            skbio.io.open([u'foo'], encoding='binary')
+
+        with self.assertRaises(ValueError):
+            skbio.io.open([u'foo'], encoding='binary', newline='\r')
 
-    def test_file_closed_harder(self):
-        """File gets closed in decorator, even if exceptions happen."""
-        f = tempfile.NamedTemporaryFile('r')
-        filepath = f.name
-        try:
-            with open_file(filepath) as fh:
-                raise TypeError
-        except TypeError:
-            self.assertTrue(fh.closed)
+    def test_open_invalid_compression(self):
+        with self.assertRaises(ValueError):
+            skbio.io.open(io.BytesIO(), compression='foo')
+
+
+class ReadableBinarySourceTests(object):
+    def check_closed(self, file, expected):
+        if hasattr(file, 'closed'):
+            self.assertEqual(file.closed, expected)
+
+    def check_open_state_contents(self, file, contents, is_binary, **kwargs):
+        result = skbio.io.open(file, **kwargs)
+        if is_binary:
+            self.assertIsInstance(result, (io.BufferedReader,
+                                           io.BufferedRandom))
         else:
-            # If we're here, no exceptions have been raised inside the
-            # try clause, so the context manager swallowed them. No
-            # good.
-            raise Exception("`open_file` didn't propagate exceptions")
-
-    def test_filehandle(self):
-        """Filehandles slip through untouched"""
-        with tempfile.TemporaryFile('r') as fh:
-            with open_file(fh) as ffh:
-                self.assertTrue(fh is ffh)
-            # And it doesn't close the file-handle
-            self.assertFalse(fh.closed)
-
-    def test_StringIO(self):
-        """StringIO (useful e.g. for testing) slips through."""
-        f = StringIO("File contents")
-        with open_file(f) as fh:
-            self.assertTrue(fh is f)
-
-    def test_BytesIO(self):
-        """BytesIO (useful e.g. for testing) slips through."""
-        f = BytesIO(b"File contents")
-        with open_file(f) as fh:
-            self.assertTrue(fh is f)
-
-
-class TestFilePathsOpening(unittest.TestCase):
-    def test_files_closed(self):
-        """File gets closed in decorator"""
-        f = tempfile.NamedTemporaryFile('r')
-        f2 = tempfile.NamedTemporaryFile('r')
-        filepath = f.name
-        filepath2 = f2.name
-        with open_files([filepath, filepath2]) as fhs:
-            pass
-        for fh in fhs:
-            self.assertTrue(fh.closed)
-
-    def test_files_closed_harder(self):
-        """File gets closed in decorator, even if exceptions happen."""
-        f = tempfile.NamedTemporaryFile('r')
-        f2 = tempfile.NamedTemporaryFile('r')
-        filepath = f.name
-        filepath2 = f2.name
-        try:
-            with open_files([filepath, filepath2]) as fhs:
-                raise TypeError
-        except TypeError:
-            for fh in fhs:
-                self.assertTrue(fh.closed)
+            self.assertIsInstance(result, io.TextIOBase)
+        self.assertTrue(result.readable())
+        self.assertEqual(result.read(), contents)
+        self.assertFalse(result.closed)
+
+        result.close()
+        self.assertTrue(result.closed)
+        self.check_closed(file, True)
+
+    def check_open_file_state_contents(self, file, contents, is_binary,
+                                       **kwargs):
+        with open_file(file, **kwargs) as f:
+            if is_binary:
+                self.assertIsInstance(f, (io.BufferedReader,
+                                          io.BufferedRandom))
+            else:
+                self.assertIsInstance(f, io.TextIOBase)
+            self.assertTrue(f.readable())
+            self.assertEqual(f.read(), contents)
+        self.assertEqual(f.closed, self.expected_close)
+
+        f.close()
+        self.assertTrue(f.closed)
+        self.check_closed(file, True)
+
+    def check_open_buffer_close_behaviour(self, file, **kwargs):
+        if hasattr(file, 'close'):
+            wrapped = skbio.io.open(file, **kwargs)
+            file.close()
+            self.assertTrue(wrapped.closed)
+
+    def check_open_file_buffer_close_behaviour(self, file, **kwargs):
+        if hasattr(file, 'close'):
+            with open_file(file, **kwargs) as wrapped:
+                file.close()
+                self.assertTrue(wrapped.closed)
+
+    def check_open_gc_behaviour(self, file, **kwargs):
+        def mangle(file):
+            result = skbio.io.open(file, **kwargs)
+            self.assertIsInstance(result, io.TextIOBase)
+
+        f = skbio.io.open(file, encoding='binary')
+        mangle(f)
+        self.assertFalse(f.closed)
+        f.close()
+
+    def check_open_file_gc_behaviour(self, file, **kwargs):
+        def mangle(file):
+            with open_file(file, **kwargs) as result:
+                self.assertIsInstance(result, io.TextIOBase)
+
+        with open_file(file, encoding='binary') as f:
+            mangle(f)
+            self.assertFalse(f.closed)
+
+    def test_open_gc_binary(self):
+        self.check_open_gc_behaviour(self.read_file)
+
+    def test_open_gc_encoding(self):
+        self.check_open_gc_behaviour(self.encoded_file)
+
+    def test_open_gc_compression(self):
+        self.check_open_gc_behaviour(self.gzip_file)
+        self.check_open_gc_behaviour(self.bz2_file)
+
+    def test_open_gc_compression_encoding(self):
+        self.check_open_gc_behaviour(self.gzip_encoded_file)
+        self.check_open_gc_behaviour(self.bz2_encoded_file)
+
+    def test_open_file_gc_binary(self):
+        self.check_open_file_gc_behaviour(self.read_file)
+
+    def test_open_file_gc_encoding(self):
+        self.check_open_file_gc_behaviour(self.encoded_file)
+
+    def test_open_file_gc_compression(self):
+        self.check_open_file_gc_behaviour(self.gzip_file)
+        self.check_open_file_gc_behaviour(self.bz2_file)
+
+    def test_open_file_gc_compression_encoding(self):
+        self.check_open_file_gc_behaviour(self.gzip_encoded_file)
+        self.check_open_file_gc_behaviour(self.bz2_encoded_file)
+
+    def test_open_underclose_binary(self):
+        self.check_open_buffer_close_behaviour(self.read_file)
+
+    def test_open_underclose_encoding(self):
+        self.check_open_buffer_close_behaviour(self.encoded_file)
+
+    def test_open_underclose_compression(self):
+        self.check_open_buffer_close_behaviour(self.gzip_file)
+        self.check_open_buffer_close_behaviour(self.bz2_file)
+
+    def test_open_underclose_compression_encoding(self):
+        self.check_open_buffer_close_behaviour(self.gzip_encoded_file)
+        self.check_open_buffer_close_behaviour(self.bz2_encoded_file)
+
+    def test_open_file_underclose_binary(self):
+        self.check_open_file_buffer_close_behaviour(self.read_file)
+
+    def test_open_file_underclose_encoding(self):
+        self.check_open_file_buffer_close_behaviour(self.encoded_file)
+
+    def test_open_file_underclose_compression(self):
+        self.check_open_file_buffer_close_behaviour(self.gzip_file)
+        self.check_open_file_buffer_close_behaviour(self.bz2_file)
+
+    def test_open_file_underclose_compression_encoding(self):
+        self.check_open_file_buffer_close_behaviour(self.gzip_encoded_file)
+        self.check_open_file_buffer_close_behaviour(self.bz2_encoded_file)
+
+    def test_open_binary(self):
+        self.check_open_state_contents(self.read_file, self.binary_contents,
+                                       True, mode='r', encoding='binary')
+
+    def test_open_binary_compression_none(self):
+        self.check_open_state_contents(self.read_file, self.binary_contents,
+                                       True, mode='r', encoding='binary',
+                                       compression=None)
+
+    def test_open_encoding(self):
+        self.check_open_state_contents(self.encoded_file,
+                                       self.decoded_contents, False,
+                                       mode='r', encoding=self.encoding)
+
+    def test_open_auto_compression_binary(self):
+        self.check_open_state_contents(self.gzip_file,
+                                       self.binary_contents, True,
+                                       mode='r', encoding='binary',
+                                       compression='auto')
+
+        self.check_open_state_contents(self.bz2_file,
+                                       self.binary_contents, True,
+                                       mode='r', encoding='binary',
+                                       compression='auto')
+
+    def test_open_gzip_compression_binary(self):
+        self.check_open_state_contents(self.gzip_file,
+                                       self.binary_contents, True,
+                                       mode='r', encoding='binary',
+                                       compression='gzip')
+
+    def test_open_bz2_compression_binary(self):
+        self.check_open_state_contents(self.bz2_file,
+                                       self.binary_contents, True,
+                                       mode='r', encoding='binary',
+                                       compression='bz2')
+
+    def test_open_default_compression_encoding(self):
+        self.check_open_state_contents(self.gzip_encoded_file,
+                                       self.decoded_contents, False,
+                                       mode='r', encoding=self.encoding)
+
+        self.check_open_state_contents(self.bz2_encoded_file,
+                                       self.decoded_contents, False,
+                                       mode='r', encoding=self.encoding)
+
+    def test_open_file_binary(self):
+        self.check_open_file_state_contents(self.read_file,
+                                            self.binary_contents,
+                                            True, mode='r', encoding='binary')
+
+    def test_open_file_binary_compression_none(self):
+        self.check_open_file_state_contents(self.read_file,
+                                            self.binary_contents,
+                                            True, mode='r', encoding='binary',
+                                            compression=None)
+
+    def test_open_file_encoding(self):
+        self.check_open_file_state_contents(self.encoded_file,
+                                            self.decoded_contents, False,
+                                            mode='r', encoding=self.encoding)
+
+    def test_open_file_auto_compression_binary(self):
+        self.check_open_file_state_contents(self.gzip_file,
+                                            self.binary_contents, True,
+                                            mode='r', encoding='binary',
+                                            compression='auto')
+
+        self.check_open_file_state_contents(self.bz2_file,
+                                            self.binary_contents, True,
+                                            mode='r', encoding='binary',
+                                            compression='auto')
+
+    def test_open_file_gzip_compression_binary(self):
+        self.check_open_file_state_contents(self.gzip_file,
+                                            self.binary_contents, True,
+                                            mode='r', encoding='binary',
+                                            compression='gzip')
+
+    def test_open_file_bz2_compression_binary(self):
+        self.check_open_file_state_contents(self.bz2_file,
+                                            self.binary_contents, True,
+                                            mode='r', encoding='binary',
+                                            compression='bz2')
+
+    def test_open_file_default_compression_encoding(self):
+        self.check_open_file_state_contents(self.gzip_encoded_file,
+                                            self.decoded_contents, False,
+                                            mode='r', encoding=self.encoding)
+
+        self.check_open_file_state_contents(self.bz2_encoded_file,
+                                            self.decoded_contents, False,
+                                            mode='r', encoding=self.encoding)
+
+
+class ReadableSourceTest(unittest.TestCase):
+    def setUp(self):
+        self.read_file = self.get_fileobj(get_data_path("example_file"))
+        self.gzip_file = \
+            self.get_fileobj(get_data_path("example_file.gz"))
+        self.bz2_file = \
+            self.get_fileobj(get_data_path("example_file.bz2"))
+        self.encoded_file = self.get_fileobj(get_data_path("big5_file"))
+        self.gzip_encoded_file = \
+            self.get_fileobj(get_data_path("big5_file.gz"))
+        self.bz2_encoded_file = \
+            self.get_fileobj(get_data_path("big5_file.bz2"))
+
+        self.binary_contents = (b"This is some content\n"
+                                b"It occurs on more than one line\n")
+        self.decoded_contents = u'\u4f60\u597d\n'  # Ni Hau
+        self.compression = 'gzip'
+        self.encoding = "big5"
+
+    def tearDown(self):
+        self.safe_close(self.read_file)
+        self.safe_close(self.gzip_file)
+        self.safe_close(self.bz2_file)
+        self.safe_close(self.encoded_file)
+        self.safe_close(self.gzip_encoded_file)
+        self.safe_close(self.bz2_encoded_file)
+
+    def safe_close(self, f):
+        if hasattr(f, 'close'):
+            f.close()
+
+
+class WritableBinarySourceTests(object):
+    def check_closed(self, file, expected):
+        if hasattr(file, 'closed'):
+            self.assertEqual(file.closed, expected)
+
+    def check_open_state_contents(self, file, contents, is_binary,
+                                  **kwargs):
+        result = skbio.io.open(file, mode='w', **kwargs)
+        if is_binary:
+            self.assertIsInstance(result, (io.BufferedWriter,
+                                           io.BufferedRandom))
         else:
-            # If we're here, no exceptions have been raised inside the
-            # try clause, so the context manager swallowed them. No
-            # good.
-            raise Exception("`open_file` didn't propagate exceptions")
-
-    def test_filehandle(self):
-        """Filehandles slip through untouched"""
-        with tempfile.TemporaryFile('r') as fh:
-            with tempfile.TemporaryFile('r') as fh2:
-                with open_file([fh, fh2]) as fhs:
-                    self.assertTrue(fh is fhs[0])
-                    self.assertTrue(fh2 is fhs[1])
-                # And it doesn't close the file-handle
-                for fh in fhs:
-                    self.assertFalse(fh.closed)
-
-    def test_StringIO(self):
-        """StringIO (useful e.g. for testing) slips through."""
-        f = StringIO("File contents")
-        with open_files([f]) as fhs:
-            self.assertTrue(fhs[0] is f)
-
-    def test_BytesIO(self):
-        """BytesIO (useful e.g. for testing) slips through."""
-        f = BytesIO(b"File contents")
-        with open_files([f]) as fhs:
-            self.assertTrue(fhs[0] is f)
+            self.assertIsInstance(result, io.TextIOBase)
+        self.assertTrue(result.writable())
+
+        result.write(contents)
+        self.assertFalse(result.closed)
+
+        if self.expected_close:
+            result.close()
+            self.assertTrue(result.closed)
+            self.check_closed(file, True)
+
+    def test_open_binary(self):
+        self.check_open_state_contents(self.binary_file, self.binary_contents,
+                                       True, encoding='binary',
+                                       compression=None)
+
+        self.assertEqual(self.get_contents(self.binary_file),
+                         self.binary_contents)
+
+    def test_open_gzip(self):
+        self.check_open_state_contents(self.gzip_file, self.text_contents,
+                                       False, compression='gzip')
+
+        # The first 10 bytes of a gzip header include a timestamp, so skip.
+        self.assertEqual(self.get_contents(self.gzip_file)[10:],
+                         self.gzip_contents[10:])
+
+    def test_open_bz2(self):
+        self.check_open_state_contents(self.bz2_file, self.text_contents,
+                                       False, compression='bz2')
+
+        self.assertEqual(self.get_contents(self.bz2_file),
+                         self.bz2_contents)
+
+    def test_open_encoding(self):
+        self.check_open_state_contents(self.big5_file, self.decoded_contents,
+                                       False, encoding='big5')
+
+        self.assertEqual(self.get_contents(self.big5_file),
+                         self.encoded_contents)
+
+    def test_open_gzip_encoding(self):
+        self.check_open_state_contents(self.gzip_encoded_file,
+                                       self.decoded_contents, False,
+                                       compression='gzip', encoding='big5')
+
+        # The first 10 bytes of a gzip header include a timestamp, so skip.
+        self.assertEqual(self.get_contents(self.gzip_encoded_file)[10:],
+                         self.gzip_encoded_contents[10:])
+
+    def test_open_bz2_encoding(self):
+        self.check_open_state_contents(self.bz2_encoded_file,
+                                       self.decoded_contents, False,
+                                       compression='bz2', encoding='big5')
+
+        self.assertEqual(self.get_contents(self.bz2_encoded_file),
+                         self.bz2_encoded_contents)
+
+
+class WritableSourceTest(unittest.TestCase):
+    def setUp(self):
+        self._dir = tempfile.mkdtemp()
+
+        with io.open(get_data_path('example_file'), mode='rb') as f:
+            self.binary_contents = f.read()
+        self.binary_file = self._make_file('example_file')
+
+        with io.open(get_data_path('big5_file'), mode='rb') as f:
+            self.encoded_contents = f.read()
+        self.big5_file = self._make_file('big5_file')
+
+        with io.open(get_data_path('example_file.gz'), mode='rb') as f:
+            self.gzip_contents = f.read()
+        self.gzip_file = self._make_file('example_file.gz')
+
+        with io.open(get_data_path('example_file.bz2'), mode='rb') as f:
+            self.bz2_contents = f.read()
+        self.bz2_file = self._make_file('example_file.bz2')
+
+        with io.open(get_data_path('big5_file.gz'), mode='rb') as f:
+            self.gzip_encoded_contents = f.read()
+        self.gzip_encoded_file = self._make_file('big5_file.gz')
+
+        with io.open(get_data_path('big5_file.bz2'), mode='rb') as f:
+            self.bz2_encoded_contents = f.read()
+        self.bz2_encoded_file = self._make_file('big5_file.bz2')
+
+        self.decoded_contents = self.encoded_contents.decode('big5')
+        self.text_contents = self.binary_contents.decode('utf8')
+
+    def tearDown(self):
+        shutil.rmtree(self._dir)
+        self.safe_close(self.binary_file)
+        self.safe_close(self.gzip_file)
+        self.safe_close(self.bz2_file)
+        self.safe_close(self.big5_file)
+        self.safe_close(self.gzip_encoded_file)
+        self.safe_close(self.bz2_encoded_file)
+
+    def safe_close(self, f):
+        if hasattr(f, 'close'):
+            f.close()
+
+    def _make_file(self, name):
+        return self.get_fileobj(os.path.join(self._dir, name))
+
+
+class TestReadFilepath(ReadableBinarySourceTests, ReadableSourceTest):
+    expected_close = True
+
+    def get_fileobj(self, path):
+        return path
+
+
+class TestWriteFilepath(WritableBinarySourceTests, WritableSourceTest):
+    expected_close = True
+
+    def get_fileobj(self, path):
+        return path
+
+    def get_contents(self, file):
+        with io.open(file, mode='rb') as f:
+            return f.read()
+
+
+class TestReadURL(ReadableBinarySourceTests, ReadableSourceTest):
+    expected_close = True
+
+    def setUp(self):
+        super(TestReadURL, self).setUp()
+        httpretty.enable()
+
+        for file in (get_data_path('example_file'),
+                     get_data_path('big5_file'),
+                     get_data_path('example_file.gz'),
+                     get_data_path('example_file.bz2'),
+                     get_data_path('big5_file.gz'),
+                     get_data_path('big5_file.bz2')):
+
+            with io.open(file, mode='rb') as f:
+                httpretty.register_uri(httpretty.GET, self.get_fileobj(file),
+                                       body=f.read(),
+                                       content_type="application/octet-stream")
+
+    def tearDown(self):
+        super(TestReadURL, self).setUp()
+        httpretty.disable()
+
+    def get_fileobj(self, path):
+        return "http://example.com/" + os.path.split(path)[1]
+
+
+class TestReadBytesIO(ReadableBinarySourceTests, ReadableSourceTest):
+    expected_close = False
+
+    def get_fileobj(self, path):
+        with io.open(path, mode='rb') as f:
+            return io.BytesIO(f.read())
+
+
+class TestWriteBytesIO(WritableBinarySourceTests, WritableSourceTest):
+    expected_close = False
+
+    def get_fileobj(self, path):
+        return io.BytesIO()
+
+    def get_contents(self, file):
+        return file.getvalue()
+
+    def test_open_gzip(self):
+        self.check_open_state_contents(self.gzip_file, self.text_contents,
+                                       False, compression='gzip')
+
+        # The first 10 bytes of a gzip header include a timestamp, so skip.
+        self.assertEqual(self.get_contents(self.gzip_file)[10:],
+                         self.gzip_contents[23:])
+
+    def test_open_gzip_encoding(self):
+        self.check_open_state_contents(self.gzip_encoded_file,
+                                       self.decoded_contents, False,
+                                       compression='gzip', encoding='big5')
+
+        # The first 10 bytes of a gzip header include a timestamp, so skip.
+        self.assertEqual(self.get_contents(self.gzip_encoded_file)[10:],
+                         self.gzip_encoded_contents[20:])
+
+
+class TestReadBufferedReader(ReadableBinarySourceTests, ReadableSourceTest):
+    expected_close = False
+
+    def get_fileobj(self, path):
+        return io.open(path, mode='rb')
+
+
+class TestWriteBufferedReader(WritableBinarySourceTests, WritableSourceTest):
+    expected_close = False
+
+    def get_fileobj(self, path):
+        return io.open(path, mode='w+b')
+
+    def get_contents(self, file):
+        file.close()
+        with io.open(file.name, mode='rb') as f:
+            return f.read()
+
+
+class TestIterableReaderWriter(unittest.TestCase):
+    def test_open(self):
+        def gen():
+            yield u'a'
+            yield u'b'
+            yield u'c'
+        list_ = list(gen())
+
+        for input_ in gen(), list_:
+            with skbio.io.open(input_) as result:
+                self.assertIsInstance(result, io.TextIOBase)
+                self.assertEqual(result.read(), u'abc')
+
+    def test_open_with_newline(self):
+        l = [u'a\r', u'b\r', u'c\r']
+        with skbio.io.open(l, newline='\r') as result:
+            self.assertIsInstance(result, io.TextIOBase)
+            self.assertEqual(result.readlines(), l)
+
+    def test_open_invalid_iterable(self):
+        with self.assertRaises(skbio.io.IOSourceError):
+            skbio.io.open([b'abc'])
+
+    def test_open_empty_iterable(self):
+        with skbio.io.open([]) as result:
+            self.assertIsInstance(result, io.TextIOBase)
+            self.assertEqual(result.read(), u'')
+
+    def test_open_write_mode(self):
+        l = []
+        with skbio.io.open(l, mode='w') as fh:
+            fh.write(u'abc')
+        self.assertEqual(l, [u'abc'])
+
+        l = []
+        with skbio.io.open(l, mode='w', newline='\r') as fh:
+            fh.write(u'ab\nc\n')
+        self.assertEqual(l, [u'ab\r', u'c\r'])
+
+        self.assertTrue(fh.closed)
+        fh.close()
+        self.assertTrue(fh.closed)
+
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/skbio/io/util.py b/skbio/io/util.py
index 362d19d..161c599 100644
--- a/skbio/io/util.py
+++ b/skbio/io/util.py
@@ -13,6 +13,7 @@ Functions
 .. autosummary::
     :toctree: generated/
 
+    open
     open_file
     open_files
 
@@ -26,74 +27,251 @@ Functions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from future.builtins import bytes, str
+from __future__ import absolute_import, division, print_function
 
-from contextlib import contextmanager
+import io
+from contextlib2 import contextmanager, ExitStack
 
+from skbio.io import IOSourceError
+from skbio.io._iosources import get_io_sources, get_compression_handler
+from skbio.io._fileobject import (
+    is_binary_file, SaneTextIOWrapper, CompressedBufferedReader,
+    CompressedBufferedWriter)
+from skbio.util._decorator import stable
 
-def _is_string_or_bytes(s):
-    """Returns True if input argument is string (unicode or not) or bytes.
-    """
-    return isinstance(s, str) or isinstance(s, bytes)
+_d = dict(mode='r', encoding=None, errors=None, newline=None,
+          compression='auto', compresslevel=9)
+
+
+def _resolve(file, mode=_d['mode'], encoding=_d['encoding'],
+             errors=_d['errors'], newline=_d['newline'],
+             compression=_d['compression'], compresslevel=_d['compresslevel']):
+    arguments = locals().copy()
+
+    if mode not in {'r', 'w'}:
+        raise ValueError("Unsupported mode: %r, use 'r' or 'w'" % mode)
+
+    newfile = None
+    source = None
+    for source_handler in get_io_sources():
+        source = source_handler(file, arguments)
+        if mode == 'r' and source.can_read():
+            newfile = source.get_reader()
+            break
+        elif mode == 'w' and source.can_write():
+            newfile = source.get_writer()
+            break
+
+    if newfile is None:
+        raise IOSourceError(
+            "Could not open source: %r (mode: %r)" % (file, mode))
+
+    return newfile, source, is_binary_file(newfile)
+
+
+ at stable(as_of="0.4.0")
+def open(file, mode=_d['mode'], encoding=_d['encoding'], errors=_d['errors'],
+         newline=_d['newline'], compression=_d['compression'],
+         compresslevel=_d['compresslevel']):
+    r"""Convert input into a filehandle.
+
+    Supported inputs:
+
+    +----------------------------+----------+-----------+-------------+
+    | type                       | can read | can write | source type |
+    +============================+==========+===========+=============+
+    | file path                  | True     | True      | Binary      |
+    +----------------------------+----------+-----------+-------------+
+    | URL                        | True     | False     | Binary      |
+    +----------------------------+----------+-----------+-------------+
+    | ``[u"lines list\n"]``      | True     | True      | Text        |
+    +----------------------------+----------+-----------+-------------+
+    | :class:`io.StringIO`       | True     | True      | Text        |
+    +----------------------------+----------+-----------+-------------+
+    | :class:`io.BytesIO`        | True     | True      | Binary      |
+    +----------------------------+----------+-----------+-------------+
+    | :class:`io.TextIOWrapper`  | True     | True      | Text        |
+    +----------------------------+----------+-----------+-------------+
+    | :class:`io.BufferedReader` | True     | False     | Binary      |
+    +----------------------------+----------+-----------+-------------+
+    | :class:`io.BufferedWriter` | False    | True      | Binary      |
+    +----------------------------+----------+-----------+-------------+
+    | :class:`io.BufferedRandom` | True     | True      | Binary      |
+    +----------------------------+----------+-----------+-------------+
 
+    .. note:: Filehandles opened with ``open`` in Python 2 are **not**
+       supported. Use ``io.open`` if you need to pass a filehandle.
+
+    .. note:: When reading a list of unicode (str) lines, the input for
+       `newline` is used to determine the number of lines in the resulting file
+       handle, not the number of elements in the list. This is to allow
+       composition with ``file.readlines()``.
+
+
+    Parameters
+    ----------
+    file : filepath, url, filehandle, list
+        The input to convert to a filehandle.
+    mode : {'r', 'w'}, optional
+        Whether to return a readable or writable file. Conversely, this does
+        not imply that the returned file will be unwritable or unreadable.
+        To geta binary filehandle set `encoding` to binary.
+    encoding : str, optional
+        The encoding scheme to use for the file. If set to 'binary', no bytes
+        will be translated. Otherwise this matches the behavior of
+        :func:`io.open`.
+    errors : str, optional
+        Specifies how encoding and decoding errors are to be handled. This has
+        no effect when `encoding` is binary (as there can be no errors).
+        Otherwise this matches the behavior of :func:`io.open`.
+    newline : {None, "", '\\n', '\\r\\n', '\\r'}, optional
+        Matches the behavior of :func:`io.open`.
+    compression : {'auto', 'gzip', 'bz2', None}, optional
+        Will compress or decompress `file` depending on `mode`. If 'auto' then
+        determining the compression of the file will be attempted and the
+        result will be transparently decompressed. 'auto' will do nothing
+        when writing. Other legal values will use their respective compression
+        schemes. `compression` cannot be used with a text source.
+    compresslevel : int (0-9 inclusive), optional
+        The level of compression to use, will be passed to the appropriate
+        compression handler. This is only used when writing.
+
+    Returns
+    -------
+    filehandle : io.TextIOBase or io.BufferedReader/Writer
+        When `encoding='binary'` an :class:`io.BufferedReader` or
+        :class:`io.BufferedWriter` will be returned depending on `mode`.
+        Otherwise an implementation of :class:`io.TextIOBase` will be returned.
+
+        .. note:: Any underlying resources needed to create `filehandle` are
+           managed transparently. If `file` was closeable, garbage collection
+           of `filehandle` will not close `file`. Calling `close` on
+           `filehandle` will close `file`. Conversely calling `close` on `file`
+           will cause `filehandle` to reflect a closed state. **This does not
+           mean that a `flush` has occured for `filehandle`, there may still
+           have been data in its buffer! Additionally, resources may not have
+           been cleaned up properly, so ALWAYS call `close` on `filehandle` and
+           NOT on `file`.**
 
-def _get_filehandle(filepath_or, *args, **kwargs):
-    """Open file if `filepath_or` looks like a string/unicode/bytes, else
-    pass through.
     """
-    if _is_string_or_bytes(filepath_or):
-        fh, own_fh = open(filepath_or, *args, **kwargs), True
+    arguments = locals().copy()
+    del arguments['file']
+
+    file, _, is_binary_file = _resolve(file, **arguments)
+    return _munge_file(file, is_binary_file, arguments)
+
+
+def _munge_file(file, is_binary_file, arguments):
+    mode = arguments.get('mode', _d['mode'])
+    encoding = arguments.get('encoding', _d['encoding'])
+    errors = arguments.get('errors', _d['errors'])
+    newline = arguments.get('newline', _d['newline'])
+    compression = arguments.get('compression', _d['compression'])
+    is_output_binary = encoding == 'binary'
+    newfile = file
+
+    compression_handler = get_compression_handler(compression)
+
+    if is_output_binary and newline is not _d['newline']:
+        raise ValueError("Cannot use `newline` with binary encoding.")
+
+    if compression is not None and not compression_handler:
+        raise ValueError("Unsupported compression: %r" % compression)
+
+    if is_binary_file:
+        if compression:
+            c = compression_handler(newfile, arguments)
+            if mode == 'w':
+                newfile = CompressedBufferedWriter(file, c.get_writer(),
+                                                   streamable=c.streamable)
+            else:
+                newfile = CompressedBufferedReader(file, c.get_reader())
+
+        if not is_output_binary:
+            newfile = SaneTextIOWrapper(newfile, encoding=encoding,
+                                        errors=errors, newline=newline)
     else:
-        fh, own_fh = filepath_or, False
-    return fh, own_fh
+        if compression is not None and compression != 'auto':
+            raise ValueError("Cannot use compression with that source.")
+        if is_output_binary:
+            raise ValueError("Source is not a binary source")
+
+    return newfile
 
 
 @contextmanager
-def open_file(filepath_or, *args, **kwargs):
-    """Context manager, like ``open``, but lets file handles and file like
-    objects pass untouched.
+def _resolve_file(file, **kwargs):
+    file, source, is_binary_file = _resolve(file, **kwargs)
+    try:
+        yield file, source, is_binary_file
+    finally:
+        if source.closeable:
+            file.close()
 
-    It is useful when implementing a function that can accept both
-    strings and file-like objects (like numpy.loadtxt, etc).
 
-    Parameters
-    ----------
-    filepath_or : str/bytes/unicode string or file-like
-         If string, file to be opened using ``open``. Else, it is returned
-         untouched.
+ at contextmanager
+ at stable(as_of="0.4.0")
+def open_file(file, **kwargs):
+    r"""Context manager for :func:`skbio.io.util.open`.
 
-    Other parameters
-    ----------------
-    args, kwargs : tuple, dict
-        When `filepath_or` is a string, any extra arguments are passed
-        on to the ``open`` builtin.
+    The signature matches :func:`open`. This context manager will not close
+    filehandles that it did not create itself.
 
     Examples
     --------
-    >>> with open_file('filename') as f:  # doctest: +SKIP
-    ...     pass
-    >>> fh = open('filename')             # doctest: +SKIP
-    >>> with open_file(fh) as f:          # doctest: +SKIP
-    ...     pass
-    >>> fh.closed                         # doctest: +SKIP
+    Here our input isn't a filehandle and so `f` will get closed.
+
+    >>> with open_file([u'a\n']) as f:
+    ...     f.read()
+    ...
+    u'a\n'
+    >>> f.closed
+    True
+
+    Here we provide an open file and so `f` will not get closed and neither
+    will `file`.
+
+    >>> file = io.BytesIO(b'BZh91AY&SY\x03\x89\x0c\xa6\x00\x00\x01\xc1\x00\x00'
+    ...                   b'\x108\x00 \x00!\x9ah3M\x1c\xb7\x8b\xb9"\x9c(H\x01'
+    ...                   b'\xc4\x86S\x00')
+    >>> with open_file(file) as f:
+    ...     f.read()
+    ...
+    u'a\nb\nc\n'
+    >>> f.closed
+    False
+    >>> file.closed
     False
-    >>> fh.close()                        # doctest: +SKIP
 
     """
-    fh, own_fh = _get_filehandle(filepath_or, *args, **kwargs)
-    try:
-        yield fh
-    finally:
-        if own_fh:
-            fh.close()
+    with _resolve_file(file, **kwargs) as (file, source, is_binary_file):
+        newfile = _munge_file(file, is_binary_file, source.options)
+        try:
+            yield newfile
+        finally:
+            # As soon as we leave the above context manager file will be closed
+            # It is important to realize that because we are closing an inner
+            # buffer, the outer buffer will reflect that state, but it won't
+            # get flushed as the inner buffer is oblivious to the outer
+            # buffer's existence.
+            if not newfile.closed:
+                newfile.flush()
+                _flush_compressor(newfile)
+
+
+def _flush_compressor(file):
+    if isinstance(file, io.TextIOBase) and hasattr(file, 'buffer'):
+        file = file.buffer
+    if isinstance(file, CompressedBufferedWriter) and not file.streamable:
+        # Some formats like BZ2 compress the entire file, and so they will
+        # only flush once they have been closed. These kinds of files do not
+        # close their underlying buffer, but only testing can prove that...
+        file.raw.close()
 
 
 @contextmanager
-def open_files(fp_list, *args, **kwargs):
-    fhs, owns = zip(*[_get_filehandle(f, *args, **kwargs) for f in fp_list])
-    try:
-        yield fhs
-    finally:
-        for fh, is_own in zip(fhs, owns):
-            if is_own:
-                fh.close()
+ at stable(as_of="0.4.0")
+def open_files(files, **kwargs):
+    """A plural form of :func:`open_file`."""
+    with ExitStack() as stack:
+        yield [stack.enter_context(open_file(f, **kwargs)) for f in files]
diff --git a/skbio/parse/record.py b/skbio/parse/record.py
deleted file mode 100644
index 03195e7..0000000
--- a/skbio/parse/record.py
+++ /dev/null
@@ -1,491 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from future.utils import viewitems
-
-from numbers import Integral
-from copy import deepcopy
-
-from skbio.io import FieldError
-
-
-def string_and_strip(*items):
-    """Converts items to strings and strips them."""
-    return [str(i).strip() for i in items]
-
-
-def DelimitedSplitter(delimiter=None, max_splits=1):
-    """Returns function that returns stripped fields split by delimiter.
-
-    Unlike the default behavior of split, max_splits can be negative, in
-    which case it counts from the end instead of the start (i.e. splits
-    at the _last_ delimiter, last two delimiters, etc. for -1, -2, etc.)
-    However, if the delimiter is None (the default) and max_splits is
-    negative, will not preserve internal spaces.
-
-    Note: leaves empty fields in place.
-    """
-    is_int = isinstance(max_splits, Integral)
-    if is_int and (max_splits > 0):
-        def parser(line):
-            return [i.strip() for i in line.split(delimiter, max_splits)]
-    elif is_int and (max_splits < 0):
-        def parser(line):
-            to_insert = delimiter or ' '  # re-join fields w/ space if None
-            fields = line.split(delimiter)
-            if (fields == []) or (fields == ['']):
-                return []  # empty string or only delimiter: return nothing
-            # if not enough fields, count from the start, not the end
-            if len(fields) < max_splits:
-                first_fields = fields[0]
-                last_fields = fields[1:]
-            # otherwise, count off the last n fields and join the remainder
-            else:
-                first_fields = fields[:max_splits]
-                last_fields = fields[max_splits:]
-            pieces = []
-            # if first_fields is empty, don't make up an extra empty string
-            if first_fields:
-                pieces.append(to_insert.join(first_fields))
-            pieces.extend(last_fields)
-            return [i.strip() for i in pieces]
-
-    else:  # ignore max_splits if it was 0
-        def parser(line):
-            return [i.strip() for i in line.split(delimiter)]
-    return parser
-
-# The following provide examples of the kinds of functions DelimitedSplitter
-# returns.
-semi_splitter = DelimitedSplitter(';', None)
-space_pairs = DelimitedSplitter(None)
-equal_pairs = DelimitedSplitter('=')
-last_colon = DelimitedSplitter(':', -1)
-
-
-class GenericRecord(dict):
-
-    """Holds data for a generic field ->: value mapping.
-
-    Override Required with {name:prototype} mapping. Each required name
-    will get a deepcopy of its prototype. For example, use an empty list to
-    guarantee that each instance has its own list for a particular field to
-    which items can be appended.
-
-    Raises AttributeError on attempt to delete required item, but does not
-    raise an exception on attempt to delete absent item.
-
-    This class explicitly does _not_ override __getitem__ or __setitem__ for
-    performance reasons: if you need to transform keys on get/set or if you
-    need to access items as attributes and vice versa, use MappedRecord
-    instead.
-    """
-    Required = {}
-
-    def __init__(self, *args, **kwargs):
-        """Reads kwargs as properties of self."""
-        # perform init on temp dict to preserve interface: will then translate
-        # aliased keys when loading into self
-        temp = {}
-        dict.__init__(temp, *args, **kwargs)
-        self.update(temp)
-        for name, prototype in viewitems(self.Required):
-            if name not in self:
-                self[name] = deepcopy(prototype)
-
-    def __delitem__(self, item):
-        """Deletes item or raises exception if item required.
-
-        Note: Fails silently if item absent.
-        """
-        if item in self.Required:
-            raise AttributeError("%s is a required item" % (item,))
-        try:
-            super(GenericRecord, self).__delitem__(item)
-        except KeyError:
-            pass
-
-    def copy(self):
-        """Coerces copy to correct type"""
-        temp = self.__class__(super(GenericRecord, self).copy())
-        # don't forget to copy attributes!
-        for attr, val in viewitems(self.__dict__):
-            temp.__dict__[attr] = deepcopy(val)
-        return temp
-
-
-class MappedRecord(GenericRecord):
-
-    """GenericRecord that maps names of fields onto standardized names.
-
-    Override Aliases in subclass for new mapping of OldName->NewName. Each
-    OldName can have only one NewName, but it's OK if several OldNames map
-    to the same NewName.
-
-    Note: can access fields either as items or as attributes. In addition,
-    can access either using nonstandard names or using standard names.
-
-    Implementation note: currently, just a dict with appropriate get/set
-    overrides and ability to access items as attributes. Attribute access
-    is about 10x slower than in GenericRecord, so make sure you need the
-    additional capabilities if you use MappedRecord instead of GenericRecord.
-
-    WARNING: MappedRecord pretends to have every attribute, so will never raise
-    AttributeError when trying to find an unknown attribute. This feature can
-    cause surprising interactions when a Delegator is delegating its
-    attributes to a MappedRecord, since any attributes defined in __init__ will
-    be set in the MappedRecord and not in the object itself. The solution is
-    to use the self.__dict__['AttributeName'] = foo syntax to force the
-    attributes to be set in the object and not the MappedRecord to which it
-    forwards.
-    """
-    Aliases = {}
-
-    DefaultValue = None
-
-    def _copy(self, prototype):
-        """Returns a copy of item."""
-        if hasattr(prototype, 'copy'):
-            return prototype.copy()
-        elif isinstance(prototype, list):
-            return prototype[:]
-        elif (isinstance(prototype, str) or isinstance(prototype, int) or
-              isinstance(prototype, tuple) or isinstance(prototype, complex) or
-              prototype is None):
-            return prototype  # immutable type: use directly
-        else:
-            return deepcopy(prototype)
-
-    def __init__(self, *args, **kwargs):
-        """Reads kwargs as properties of self."""
-        # perform init on temp dict to preserve interface: will then translate
-        # aliased keys when loading into self
-        temp = {}
-        unalias = self.unalias
-        dict.__init__(temp, *args, **kwargs)
-        for key, val in viewitems(temp):
-            self[unalias(key)] = val
-        for name, prototype in viewitems(self.Required):
-            new_name = unalias(name)
-            if new_name not in self:
-                self[new_name] = self._copy(prototype)
-
-    def unalias(self, key):
-        """Returns dealiased name for key, or key if not in alias."""
-        try:
-            return self.Aliases.get(key, key)
-        except TypeError:
-            return key
-
-    def __getattr__(self, attr):
-        """Returns None if field is absent, rather than raising exception."""
-        if attr in self:
-            return self[attr]
-        elif attr in self.__dict__:
-            return self.__dict__[attr]
-        elif attr.startswith('__'):  # don't retrieve private class attrs
-            raise AttributeError
-        elif hasattr(self.__class__, attr):
-            return getattr(self.__class__, attr)
-        else:
-            return self._copy(self.DefaultValue)
-
-    def __setattr__(self, attr, value):
-        """Sets attribute in self if absent, converting name if necessary."""
-        normal_attr = self.unalias(attr)
-        # we overrode __getattr__, so have to simulate getattr(self, attr) by
-        # calling superclass method and checking for AttributeError.
-        # BEWARE: dict defines __getattribute__, not __getattr__!
-        try:
-            super(MappedRecord, self).__getattribute__(normal_attr)
-            super(MappedRecord, self).__setattr__(normal_attr, value)
-        except AttributeError:
-            self[normal_attr] = value
-
-    def __delattr__(self, attr):
-        """Deletes attribute, converting name if necessary. Fails silently."""
-        normal_attr = self.unalias(attr)
-        if normal_attr in self.Required:
-            raise AttributeError("%s is a required attribute" % (attr,))
-        else:
-            try:
-                super(MappedRecord, self).__delattr__(normal_attr)
-            except AttributeError:
-                del self[normal_attr]
-
-    def __getitem__(self, item):
-        """Returns default if item is absent, rather than raising exception."""
-        normal_item = self.unalias(item)
-        return self.get(normal_item, self._copy(self.DefaultValue))
-
-    def __setitem__(self, item, val):
-        """Sets item, converting name if necessary."""
-        super(MappedRecord, self).__setitem__(self.unalias(item), val)
-
-    def __delitem__(self, item):
-        """Deletes item, converting name if necessary. Fails silently."""
-        normal_item = self.unalias(item)
-        super(MappedRecord, self).__delitem__(normal_item)
-
-    def __contains__(self, item):
-        """Tests membership, converting name if necessary."""
-        return super(MappedRecord, self).__contains__(self.unalias(item))
-
-    def get(self, item, default):
-        """Returns self[item] or default if not present. Silent on unhashable.
-        """
-        try:
-            return super(MappedRecord, self).get(self.unalias(item), default)
-        except TypeError:
-            return default
-
-    def setdefault(self, key, default=None):
-        """Returns self[key] or default (and sets self[key]=default)"""
-        return super(MappedRecord, self).setdefault(self.unalias(key), default)
-
-    def update(self, *args, **kwargs):
-        """Updates self with items in other"""
-        temp = {}
-        unalias = self.unalias
-        temp.update(*args, **kwargs)
-        for key, val in viewitems(temp):
-            self[unalias(key)] = val
-
-# The following methods are useful for handling particular types of fields in
-# line-oriented parsers
-
-
-def TypeSetter(constructor=None):
-    """Returns function that takes obj, field, val and sets obj.field = val.
-
-    constructor can be any callable that returns an object.
-    """
-    if constructor:
-        def setter(obj, field, val):
-            setattr(obj, field, constructor(val))
-    else:
-        def setter(obj, field, val):
-            setattr(obj, field, val)
-    return setter
-
-int_setter = TypeSetter(int)
-str_setter = TypeSetter(str)
-list_setter = TypeSetter(list)
-tuple_setter = TypeSetter(tuple)
-dict_setter = TypeSetter(dict)
-float_setter = TypeSetter(float)
-complex_setter = TypeSetter(complex)
-bool_setter = TypeSetter(bool)
-identity_setter = TypeSetter()
-
-
-def list_adder(obj, field, val):
-    """Adds val to list in obj.field, creating list if necessary."""
-    try:
-        getattr(obj, field).append(val)
-    except AttributeError:
-        setattr(obj, field, [val])
-
-
-def dict_adder(obj, field, val):
-    """If val is a sequence, adds key/value pair in obj.field: else adds key.
-    """
-    try:
-        key, value = val
-    except (ValueError, TypeError):
-        key, value = val, None
-    try:
-        getattr(obj, field)[key] = value
-    except AttributeError:
-        setattr(obj, field, {key: value})
-
-
-class LineOrientedConstructor(object):
-
-    """Constructs a MappedRecord from a sequence of lines."""
-
-    def __init__(self, Lines=None, LabelSplitter=space_pairs, FieldMap=None,
-                 Constructor=MappedRecord, Strict=False):
-        """Returns new LineOrientedConstructor.
-
-        Fields:
-            Lines: set of lines to construct record from (for convenience).
-            Default is None.
-
-            LabelSplitter: function that returns (label, data) tuple.
-            Default is to split on first space and strip components.
-
-            FieldMap: dict of {fieldname:handler} functions. Each function
-            has the signature (obj, field, val) and performs an inplace
-            action like setting field to val or appending val to field.
-            Default is empty dict.
-
-            Constructor: constructor for the resulting object.
-            Default is MappedRecord: beware of using constructors that don't
-            subclass MappedRecord.
-
-            Strict: boolean controlling whether to raise error on unrecognized
-            field. Default is False.
-        """
-        self.Lines = Lines or []
-        self.LabelSplitter = LabelSplitter
-        self.FieldMap = FieldMap or {}
-        self.Constructor = Constructor
-        self.Strict = Strict
-
-    def __call__(self, Lines=None):
-        """Returns the record constructed from Lines, or self.Lines"""
-        if Lines is None:
-            Lines = self.Lines
-        result = self.Constructor()
-        fieldmap = self.FieldMap
-        aka = result.unalias
-
-        splitter = self.LabelSplitter
-        for line in Lines:
-            # find out how many items we got, setting key and val appropiately
-            items = list(splitter(line))
-            num_items = len(items)
-            if num_items == 2:  # typical case: key-value pair
-                raw_field, val = items
-            elif num_items > 2:
-                raw_field = items[0]
-                val = items[1:]
-            elif len(items) == 1:
-                raw_field, val = items[0], None
-            elif not items:  # presumably had line with just a delimiter?
-                continue
-            # figure out if we know the field under its original name or as
-            # an alias
-            if raw_field in fieldmap:
-                field, mapper = raw_field, fieldmap[raw_field]
-            else:
-                new_field = aka(raw_field)
-                if new_field in fieldmap:
-                    field, mapper = new_field, fieldmap[new_field]
-                else:
-                    if self.Strict:
-                        raise FieldError(
-                            "Got unrecognized field %s" %
-                            (raw_field,))
-                    else:
-                        identity_setter(result, raw_field, val)
-                    continue
-            # if we found the field in the fieldmap, apply the correct function
-            try:
-                mapper(result, field, val)
-            except:  # Warning: this is a catchall for _any_ exception,
-                        # and may mask what's actually going wrong.
-                if self.Strict:
-                    raise FieldError("Could not handle line %s" % (line,))
-        return result
-
-
-def FieldWrapper(fields, splitter=None, constructor=None):
-    """Returns dict containing field->val mapping, one level.
-
-    fields should be list of fields, in order.
-    splitter should be something like a DelimitedSplitter that converts the
-        line into a sequence of fields.
-    constructor is a callable applied to the dict after construction.
-
-    Call result on a _single_ line, not a list of lines.
-
-    Note that the constructor should take a dict and return an object of some
-    useful type. Additionally, it is the _constructor's_ responsibility to
-    complain if there are not enough fields, since zip will silently truncate
-    at the shorter sequence. This is actually useful in the case where many of
-    the later fields are optional.
-    """
-    if splitter is None:
-        splitter = DelimitedSplitter(None, None)
-    if constructor:
-        def parser(line):
-            return constructor(dict(zip(fields, splitter(line))))
-    else:
-        def parser(line):
-            return dict(zip(fields, splitter(line)))
-    return parser
-
-
-def StrictFieldWrapper(fields, splitter=None, constructor=None):
-    """Returns dict containing field->val mapping, one level.
-
-    fields should be list of fields, in order.
-    splitter should be something like a DelimitedSplitter that converts the
-        line into a sequence of fields.
-    constructor is a callable applied to the dict after construction.
-
-    Call result on a _single_ line, not a list of lines.
-
-    Note that the constructor should take a dict and return an object of some
-    useful type. Raises RecordError if the wrong number of fields are returned
-    from the split.
-    """
-    if splitter is None:
-        splitter = DelimitedSplitter(None, None)
-    if constructor:
-        def parser(line):
-            items = splitter(line)
-            if len(items) != len(fields):
-                raise FieldError("Expected %s items but got %s: %s" %
-                                 (len(fields), len(items), items))
-            return constructor(dict(zip(fields, items)))
-    else:
-        def parser(line):
-            items = splitter(line)
-            if len(items) != len(fields):
-                raise FieldError("Expected %s items but got %s: %s" %
-                                 (len(fields), len(items), items))
-            return dict(zip(fields, items))
-    return parser
-
-
-def raise_unknown_field(field, data):
-    """Raises a FieldError, displaying the offending field and data."""
-    raise FieldError("Got unknown field %s with data %s" % (field, data))
-
-
-class FieldMorpher(object):
-
-    """When called, applies appropriate constructors to each value of dict.
-
-    Initialize using a dict of fieldname:constructor pairs.
-    """
-
-    def __init__(self, Constructors, Default=raise_unknown_field):
-        """Returns a new FieldMorpher, using appropriate constructors.
-
-        If a field is unknown, will try to set key and value to the results
-        of Default(key, value): in other words, the signature of Default should
-        take a key and a value and should return a key and a value. The
-        built-in value of Default raises a FieldError instead, but it will
-        often be useful to do things like return the key/value pair unchanged,
-        or to strip the key and the value and then add them.
-        """
-        self.Constructors = Constructors
-        self.Default = Default
-
-    def __call__(self, data):
-        """Returns a new dict containing information converted from data."""
-        result = {}
-        default = self.Default
-        cons = self.Constructors
-        for key, val in viewitems(data):
-            if key in cons:
-                result[key] = cons[key](val)
-            else:
-                new_key, new_val = default(key, val)
-                # if we now recognize the key, use its constructor on the old
-                # val
-                if new_key in cons:
-                    result[new_key] = cons[new_key](val)
-                # otherwise, enter the new key and the new val
-                else:
-                    result[new_key] = new_val
-        return result
diff --git a/skbio/parse/record_finder.py b/skbio/parse/record_finder.py
deleted file mode 100644
index 15b4951..0000000
--- a/skbio/parse/record_finder.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-"""Provides some classes for treating files as sequences of records.
-
-Typically more useful as subclasses. Covers the three main types of records:
-
-    DelimitedRecordFinder:  Records demarcated by an end line, e.g. '\\'
-    LabeledRecordFinder:    Records demarcated by a start line, e.g. '>label'
-    LineGrouper:            Records consisting of a certain number of lines.
-    TailedRecordFinder:     Records demarcated by an end mark, e.g. 'blah.'
-
-All the first classes ignore/delete blank lines and strip leading and trailing
-whitespace.  The TailedRecodeFinder is Functional similar to
-DelimitedRecordFinder except that it accept a is_tail function instead of a
-str.  Note that its default constuctor is rstrip instead of strip.
-"""
-
-from skbio.io import RecordError
-from skbio.io.util import open_file
-
-
-def is_empty(line):
-    """Returns True empty lines and lines consisting only of whitespace."""
-    return (not line) or line.isspace()
-
-
-def DelimitedRecordFinder(delimiter, constructor=str.strip, ignore=is_empty,
-                          keep_delimiter=True, strict=True):
-    """Returns function that returns successive delimited records from file.
-
-    Includes delimiter in return value. Returns list of relevant lines.
-
-    Default constructor is str.strip, but can supply another constructor
-    to transform lines and/or coerce into correct type. If constructor is None,
-    passes along the lines without alteration.
-
-    Skips any lines for which ignore(line) evaluates True (default is to skip
-    whitespace).
-
-    keep_delimiter: keep delimiter line at the end of last block if True
-    (default), otherwise discard delimiter line.
-
-    strict: when lines found after the last delimiter -- raise error if True
-    (default), otherwise yield the lines silently
-    """
-    def parser(lines):
-        curr = []
-        for line in lines:
-            if constructor is not None:
-                line = constructor(line)
-            # else:
-            #    line = l
-            # ignore blank lines
-            if ignore(line):
-                continue
-            # if we find the delimiter, return the line; otherwise, keep it
-            if line == delimiter:
-                if keep_delimiter:
-                    curr.append(line)
-                yield curr
-                curr = []
-            else:
-                curr.append(line)
-        if curr:
-            if strict:
-                raise RecordError("Found additional data after records: %s" %
-                                  (curr))
-            else:
-                yield curr
-    return parser
-
-# The following is an example of the sorts of iterators RecordFinder returns.
-GbFinder = DelimitedRecordFinder('//')
-
-
-def TailedRecordFinder(is_tail_line, constructor=str.rstrip, ignore=is_empty,
-                       strict=True):
-    """Returns function that returns successive tailed records from lines.
-
-    Includes tail line in return value. Returns list of relevant lines.
-
-    constructor: a modifier for each line, default is str.rstrip: to remove
-    \n and trailing spaces. If constructor is None, passes along the lines
-    without alteration.
-
-    Skips over any lines for which ignore(line) evaluates True (default is
-    to skip empty lines).  note that the line maybe modified by constructor.
-
-    strict: if True(default), raise error if the last line is not a tail.
-    otherwise, yield the last lines.
-    """
-    def parser(lines):
-        curr = []
-        for line in lines:
-            if constructor is not None:
-                line = constructor(line)
-            if ignore(line):
-                continue
-
-            curr.append(line)
-            # if we find the label, return the previous record
-            if is_tail_line(line):
-                yield curr
-                curr = []
-
-        # don't forget to return the last record in the file
-        if curr:
-            if strict:
-                raise RecordError('lines exist after the last tail_line '
-                                  'or no tail_line at all')
-            else:
-                yield curr
-
-    return parser
-
-
-def LabeledRecordFinder(is_label_line, constructor=str.strip, ignore=is_empty):
-    """Returns function that returns successive labeled records from file.
-
-    Includes label line in return value. Returns list of relevant lines.
-
-    Default constructor is string.strip, but can supply another constructor
-    to transform lines and/or coerce into correct type. If constructor is None,
-    passes along the lines without alteration.
-
-    Skips over any lines for which ignore(line) evaluates True (default is
-    to skip empty lines).
-
-    NOTE: Does _not_ raise an exception if the last line is a label line: for
-    some formats, this is acceptable. It is the responsibility of whatever is
-    parsing the sets of lines returned into records to complain if a record
-    is incomplete.
-    """
-    def parser(lines):
-        with open_file(lines) as lines:
-            curr = []
-            for l in lines:
-                try:
-                    l = str(l.decode('utf-8'))
-                except AttributeError:
-                    pass
-
-                if constructor is not None:
-                    line = constructor(l)
-                else:
-                    line = l
-                if ignore(line):
-                    continue
-                # if we find the label, return the previous record
-                if is_label_line(line):
-                    if curr:
-                        yield curr
-                        curr = []
-                curr.append(line)
-            # don't forget to return the last record in the file
-            if curr:
-                yield curr
-    return parser
-
-
-def LineGrouper(num, constructor=str.strip, ignore=is_empty):
-    """Returns num lines at a time, stripping and ignoring blanks.
-
-    Default constructor is str.strip, but can supply another constructor
-    to transform lines and/or coerce into correct type. If constructor is None,
-    passes along the lines without alteration.
-
-    Skips over any lines for which ignore(line) evaluates True: default is to
-    skip whitespace lines.
-
-    """
-    def parser(lines):
-        curr = []
-        for l in lines:
-            if constructor is not None:
-                line = constructor(l)
-            else:
-                line = l
-            if ignore(line):
-                continue
-            curr.append(line)
-            if len(curr) == num:
-                yield curr
-                curr = []
-        if curr:
-            raise RecordError("Non-blank lines not even multiple of %s" % num)
-    return parser
diff --git a/skbio/parse/sequences/__init__.py b/skbio/parse/sequences/__init__.py
deleted file mode 100644
index 2afdfd3..0000000
--- a/skbio/parse/sequences/__init__.py
+++ /dev/null
@@ -1,201 +0,0 @@
-r"""
-Parse biological sequences (:mod:`skbio.parse.sequences`)
-=========================================================
-
-.. currentmodule:: skbio.parse.sequences
-
-This module provides functions for parsing sequence files in a variety of
-different formats. Two interfaces are provided for parsing sequence files:
-sequence iterators (high-level, recommended interface) and parsing functions
-(lower-level interface).
-
-Sequence iterator interface
----------------------------
-The sequence iterator interface is the recommended way to parse sequence files.
-The ``load`` function provides a standard, high-level interface to iterate over
-sequence files regardless of file type or whether they are compressed. The
-method accepts single or multiple file paths and employs the correct file
-handlers, iterator objects, and parsers for the user.
-
-The benefit of the sequence iterator interface is that the type of the file and
-any file format details are abstracted away from the user. In this manner, the
-user does not need to worry about whether they're operating on FASTA or FASTQ
-files or any differences in the returns from their respective parsers.
-
-Classes
-^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
-
-   SequenceIterator
-   FastaIterator
-   FastqIterator
-
-Functions
-^^^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
-
-    load
-
-Examples
-^^^^^^^^
-For the first set of sequence iterator examples, we're going to use the
-``load`` function. The ``load`` function is intended to operate on file paths,
-so let's create two files for it to use. The first one will be a regular FASTA
-file, and the second will be a gzip'd FASTQ file:
-
->>> import os
->>> import gzip
->>> out = open('test_seqs.fna', 'w')
->>> out.write(">s1\nATGC\n>s2\nATGGC\n")
->>> out.close()
->>> outgz = gzip.open('test_seqs.fq.gz', 'w')
->>> _ = outgz.write("@s3\nAATTGG\n+\nghghgh\n at s4\nAAA\n+\nfgh\n")
->>> outgz.close()
-
-Now let's see what ``load`` can do:
-
->>> it = load(['test_seqs.fna', 'test_seqs.fq.gz'], phred_offset=64)
->>> for rec in it:
-...     print rec['SequenceID']
-...     print rec['Sequence']
-...     print rec['Qual']
-s1
-ATGC
-None
-s2
-ATGGC
-None
-s3
-AATTGG
-[39 40 39 40 39 40]
-s4
-AAA
-[38 39 40]
-
-To be polite, let's remove the files we just created:
-
->>> os.remove('test_seqs.fna')
->>> os.remove('test_seqs.fq.gz')
-
-In the following examples, we'll see how to use the sequence iterators directly
-instead of using ``load``.
-
->>> from StringIO import StringIO
->>> from skbio.parse.sequences import FastaIterator, FastqIterator
-
-In this first example, we're going to construct a FASTA iterator that is also
-paired with quality scores (e.g., as in 454 fasta/qual files).
-
->>> seqs = StringIO(">seq1\n"
-...                 "ATGC\n"
-...                 ">seq2\n"
-...                 "TTGGCC\n")
->>> qual = StringIO(">seq1\n"
-...                 "10 20 30 40\n"
-...                 ">seq2\n"
-...                 "1 2 3 4 5 6\n")
->>> it = FastaIterator(seq=[seqs], qual=[qual])
->>> for record in it:
-...     print record['Sequence']
-...     print record['Qual']
-ATGC
-[10 20 30 40]
-TTGGCC
-[1 2 3 4 5 6]
-
-In the next example, we're going to iterate over multiple FASTQ files at once.
-
->>> seqs1 = StringIO("@seq1\n"
-...                  "ATGC\n"
-...                  "+\n"
-...                  "hhhh\n")
->>> seqs2 = StringIO("@seq2\n"
-...                 "AATTGGCC\n"
-...                 ">seq2\n"
-...                 "abcdefgh\n")
->>> it = FastqIterator(seq=[seqs1, seqs2], phred_offset=64)
->>> for record in it:
-...     print record['Sequence']
-...     print record['Qual']
-ATGC
-[40 40 40 40]
-AATTGGCC
-[33 34 35 36 37 38 39 40]
-
-Finally, we can apply arbitrary transforms to the sequences during iteration.
-
->>> seqs1 = StringIO("@seq1\n"
-...                  "ATGC\n"
-...                  "+\n"
-...                  "hhhh\n")
->>> seqs2 = StringIO("@seq2\n"
-...                 "AATTGGCC\n"
-...                 ">seq2\n"
-...                 "abcdefgh\n")
->>> def rev_f(st):
-...     st['Sequence'] = st['Sequence'][::-1]
-...     st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
->>> it = FastqIterator(seq=[seqs1, seqs2], transform=rev_f, phred_offset=64)
->>> for record in it:
-...     print record['Sequence']
-...     print record['Qual']
-CGTA
-[40 40 40 40]
-CCGGTTAA
-[40 39 38 37 36 35 34 33]
-
-Low-level parsing functions
----------------------------
-Lower-level parsing functions are also made available in addition to the
-sequence iterator interface. These functions can be used to directly parse a
-single sequence file. They accept file paths, file handles, or file-like
-objects.
-
-Functions
-^^^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
-
-   parse_fasta
-   parse_fastq
-   parse_qual
-   write_clustal
-   parse_clustal
-
-Exceptions
-----------
-
-.. autosummary::
-   :toctree: generated/
-
-   FastqParseError
-
-"""
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from numpy.testing import Tester
-
-from .fasta import parse_fasta, parse_qual
-from .fastq import parse_fastq
-from .clustal import parse_clustal, write_clustal
-from .iterator import FastaIterator, FastqIterator, SequenceIterator
-from .factory import load
-from ._exception import FastqParseError
-
-__all__ = ['write_clustal', 'parse_clustal',
-           'parse_fasta', 'parse_fastq', 'parse_qual', 'FastqIterator',
-           'FastaIterator', 'SequenceIterator', 'load', 'FastqParseError']
-
-test = Tester().test
diff --git a/skbio/parse/sequences/clustal.py b/skbio/parse/sequences/clustal.py
deleted file mode 100644
index 3160c8f..0000000
--- a/skbio/parse/sequences/clustal.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-from __future__ import absolute_import, division, print_function
-
-from skbio.io import RecordError
-from skbio.parse.record import DelimitedSplitter
-import warnings
-
-
-def _label_line_parser(record, splitter, strict=True):
-    """Returns dict mapping list of data to labels, plus list with field order.
-
-    Field order contains labels in order encountered in file.
-
-    NOTE: doesn't care if lines are out of order in different blocks. This
-    should never happen anyway, but it's possible that this behavior should
-    be changed to tighten up validation.
-    """
-    labels = []
-    result = {}
-    for line in record:
-        try:
-            key, val = splitter(line.rstrip())
-        except:
-            if strict:
-                raise RecordError(
-                    "Failed to extract key and value from line %s" %
-                    line)
-            else:
-                continue  # just skip the line if not strict
-
-        if key in result:
-            result[key].append(val)
-        else:
-            result[key] = [val]
-            labels.append(key)
-    return result, labels
-
-
-def _is_clustal_seq_line(line):
-    """Returns True if line starts with a non-blank character but not 'CLUSTAL'
-
-    Useful for filtering other lines out of the file.
-    """
-    return line and (not line[0].isspace()) and\
-        (not line.startswith('CLUSTAL')) and (not line.startswith('MUSCLE'))
-
-last_space = DelimitedSplitter(None, -1)
-
-
-def _delete_trailing_number(line):
-    """Deletes trailing number from a line.
-
-    WARNING: does not preserve internal whitespace when a number is removed!
-    (converts each whitespace run to a single space). Returns the original
-    line if it didn't end in a number.
-    """
-    pieces = line.split()
-    try:
-        int(pieces[-1])
-        return ' '.join(pieces[:-1])
-    except ValueError:  # no trailing numbers
-        return line
-
-
-def write_clustal(records, fh):
-    warnings.warn(
-        "write_clustal is deprecated and will be removed in "
-        "scikit-bio 0.3.0. Please update your code to use Alignment.write.",
-        DeprecationWarning)
-    clen = 60
-    records = list(records)
-    names, seqs = zip(*records)
-    nameLen = max(map(len, names))
-    seqLen = max(map(len, seqs))
-    fh.write('CLUSTAL\n\n')
-    for i in range(0, seqLen, clen):
-        for label, seq in records:
-            name = ('{:<%d}' % (nameLen)).format(label)
-            fh.write("%s\t%s\t\n" % (name, seq[i:i+clen]))
-        fh.write("\n")
-
-
-def parse_clustal(record, strict=True):
-    warnings.warn(
-        "parse_clustal is deprecated and will be removed in "
-        "scikit-bio 0.3.0. Please update your code to use Alignment.read.",
-        DeprecationWarning)
-
-    records = map(_delete_trailing_number,
-                  filter(_is_clustal_seq_line, record))
-    data, labels = _label_line_parser(records, last_space, strict)
-
-    for key in labels:
-        yield key, ''.join(data[key])
diff --git a/skbio/parse/sequences/factory.py b/skbio/parse/sequences/factory.py
deleted file mode 100644
index 16e4667..0000000
--- a/skbio/parse/sequences/factory.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-import os
-from gzip import open as gzip_open
-from itertools import chain
-
-from .iterator import FastaIterator, FastqIterator
-
-
-FILEEXT_MAP = {'fna': (FastaIterator, open),
-               'fna.gz': (FastaIterator, gzip_open),
-               'fasta': (FastaIterator, open),
-               'fasta.gz': (FastaIterator, gzip_open),
-               'qual': (FastaIterator, open),
-               'qual.gz': (FastaIterator, gzip_open),
-               'fastq': (FastqIterator, open),
-               'fastq.gz': (FastqIterator, gzip_open),
-               'fq': (FastqIterator, open),
-               'fq.gz': (FastqIterator, gzip_open)}
-
-
-def _determine_types_and_openers(files):
-    """Attempt to determine the appropriate iterators and openers"""
-    if files is None:
-        return [], []
-
-    iters = []
-    openers = []
-    for fpath in files:
-        if fpath.endswith('.gz'):
-            ext = '.'.join(fpath.rsplit('.', 2)[-2:])
-        else:
-            ext = fpath.rsplit('.', 1)[-1]
-
-        i, o = FILEEXT_MAP.get(ext, (None, None))
-        if i is None:
-            raise IOError("Unknown filetype for %s" % fpath)
-
-        iters.append(i)
-        openers.append(o)
-
-    return iters, openers
-
-
-def _is_single_iterator_type(iters):
-    """Determine if there is a single or multiple type of iterator
-
-    If iters is [], this method returns True it considers the null case to be
-    a single iterator type.
-    """
-    if iters:
-        return len(set(iters)) == 1
-    else:
-        return True
-
-
-def _open_or_none(opener, f):
-    """Open a file or returns None"""
-    if not opener:
-        return None
-    else:
-        name = opener.__name__
-
-        if not os.path.exists(f):
-            raise IOError("%s does not appear to exist!" % f)
-        try:
-            opened = opener(f)
-        except IOError:
-            raise IOError("Could not open %s with %s!" % (f, name))
-
-        return opened
-
-
-def load(seqs, qual=None, constructor=None, **kwargs):
-    """Construct the appropriate iterator for all your processing needs
-
-    This method will attempt to open all files correctly and to feed the
-    appropriate objects into the correct iterators.
-
-    Seqs can list multiple types of files (e.g., FASTA and FASTQ), but if
-    multiple file types are specified, qual must be None
-
-    Parameters
-    ----------
-    seqs : str or list of sequence file paths
-    qual : str or list of qual file paths or None
-    constructor : force a constructor on seqs
-    kwargs : dict
-        passed into the subsequent generators.
-
-    Returns
-    -------
-    SequenceIterator
-        the return is ``Iterable``
-
-    See Also
-    --------
-    SequenceIterator
-    FastaIterator
-    FastqIterator
-
-    """
-    if not seqs:
-        raise ValueError("Must supply sequences.")
-
-    if isinstance(seqs, str):
-        seqs = [seqs]
-
-    if isinstance(qual, str):
-        qual = [qual]
-
-    # i -> iters, o -> openers
-    if constructor is not None:
-        i_seqs = [constructor] * len(seqs)
-        o_seqs = [open] * len(seqs)
-    else:
-        i_seqs, o_seqs = _determine_types_and_openers(seqs)
-
-    i_qual, o_qual = _determine_types_and_openers(qual)
-
-    seqs = [_open_or_none(o, f) for f, o in zip(seqs, o_seqs)]
-    qual = [_open_or_none(o, f) for f, o in zip(qual or [], o_qual or [])]
-
-    if not qual:
-        qual = None
-
-    if not _is_single_iterator_type(i_seqs) and qual is not None:
-        # chaining Fasta/Fastq for sequence is easy, but it gets nasty quick
-        # if seqs is a mix of fasta/fastq, with qual coming in as there aren't
-        # 1-1 mappings. This could be addressed if necessary, but seems like
-        # an unnecessary block of code right now
-        raise ValueError("Cannot handle multiple sequence file types and qual "
-                         "file(s) at the same time.")
-
-    if _is_single_iterator_type(i_seqs):
-        seqs_constructor = i_seqs[0]
-        gen = seqs_constructor(seq=seqs, qual=qual, **kwargs)
-    else:
-        gen = chain(*[c(seq=[fp], **kwargs) for c, fp in zip(i_seqs, seqs)])
-
-    return gen
diff --git a/skbio/parse/sequences/fasta.py b/skbio/parse/sequences/fasta.py
deleted file mode 100644
index 747fc7d..0000000
--- a/skbio/parse/sequences/fasta.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-from __future__ import absolute_import, division, print_function
-
-import warnings
-
-import numpy as np
-
-from skbio.io import RecordError
-from skbio.parse.record_finder import LabeledRecordFinder
-
-
-def is_fasta_label(x):
-    """Checks if x looks like a FASTA label line."""
-    return x.startswith('>')
-
-
-def is_blank_or_comment(x):
-    """Checks if x is blank or a FASTA comment line."""
-    return (not x) or x.startswith('#') or x.isspace()
-
-
-FastaFinder = LabeledRecordFinder(is_fasta_label, ignore=is_blank_or_comment)
-
-
-def parse_fasta(infile, strict=True, label_to_name=None, finder=FastaFinder,
-                label_characters='>', ignore_comment=False):
-    r"""Generator of labels and sequences from a fasta file.
-
-    .. note:: Deprecated in scikit-bio 0.2.0-dev
-       ``parse_fasta`` will be removed in scikit-bio 0.3.0. It is replaced by
-       ``read``, which is a more general method for deserializing
-       FASTA-formatted files. ``read`` supports multiple file formats,
-       automatic file format detection, etc. by taking advantage of
-       scikit-bio's I/O registry system. See :mod:`skbio.io` for more details.
-
-    Parameters
-    ----------
-    infile : open file object or str
-        An open fasta file or a path to a fasta file.
-
-    strict : bool
-        If ``True`` a ``RecordError`` will be raised if there is a fasta label
-        line with no associated sequence, or a sequence with no associated
-        label line (in other words, if there is a partial record). If
-        ``False``, partial records will be skipped.
-
-    label_to_name : function
-        A function to apply to the sequence label (i.e., text on the header
-        line) before yielding it. By default, the sequence label is returned
-        with no processing. This function must take a single string as input
-        and return a single string as output.
-
-    finder : function
-        The function to apply to find records in the fasta file. In general
-        you should not have to change this.
-
-    label_characters : str
-        String used to indicate the beginning of a new record. In general you
-        should not have to change this.
-
-    ignore_comment : bool
-        If `True`, split the sequence label on spaces, and return the label
-        only as the first space separated field (i.e., the sequence
-        identifier). Note: if both ``ignore_comment`` and ``label_to_name`` are
-        passed, ``ignore_comment`` is ignored (both operate on the label, so
-        there is potential for things to get messy otherwise).
-
-    Returns
-    -------
-    two-item tuple of str
-        yields the label and sequence for each entry.
-
-    Raises
-    ------
-    RecordError
-        If ``strict == True``, raises a ``RecordError`` if there is a fasta
-        label line with no associated sequence, or a sequence with no
-        associated label line (in other words, if there is a partial record).
-
-    Examples
-    --------
-    Assume we have a fasta-formatted file with the following contents::
-
-        >seq1 db-accession-149855
-        CGATGTCGATCGATCGATCGATCAG
-        >seq2 db-accession-34989
-        CATCGATCGATCGATGCATGCATGCATG
-
-    >>> from StringIO import StringIO
-    >>> fasta_f = StringIO('>seq1 db-accession-149855\n'
-    ...                    'CGATGTCGATCGATCGATCGATCAG\n'
-    ...                    '>seq2 db-accession-34989\n'
-    ...                    'CATCGATCGATCGATGCATGCATGCATG\n')
-
-    We can parse this as follows:
-
-    >>> from skbio.parse.sequences import parse_fasta
-    >>> for label, seq in parse_fasta(fasta_f):
-    ...     print(label, seq)
-    seq1 db-accession-149855 CGATGTCGATCGATCGATCGATCAG
-    seq2 db-accession-34989 CATCGATCGATCGATGCATGCATGCATG
-
-    The sequence label or header line in a fasta file is defined as containing
-    two separate pieces of information, delimited by a space. The first space-
-    separated entry is the sequence identifier, and everything following the
-    first space is considered additional information (e.g., comments about the
-    source of the sequence or the molecule that it encodes). Often we don't
-    care about that information within our code. If you want to just return the
-    sequence identifier from that line, you can pass ``ignore_comment=True``:
-
-    >>> from StringIO import StringIO
-    >>> fasta_f = StringIO('>seq1 db-accession-149855\n'
-    ...                    'CGATGTCGATCGATCGATCGATCAG\n'
-    ...                    '>seq2 db-accession-34989\n'
-    ...                    'CATCGATCGATCGATGCATGCATGCATG\n')
-
-    >>> from skbio.parse.sequences import parse_fasta
-    >>> for label, seq in parse_fasta(fasta_f, ignore_comment=True):
-    ...     print(label, seq)
-    seq1 CGATGTCGATCGATCGATCGATCAG
-    seq2 CATCGATCGATCGATGCATGCATGCATG
-
-    """
-    warnings.warn(
-        "`parse_fasta` is deprecated and will be removed in scikit-bio 0.3.0. "
-        "Please update your code to use `skbio.io.read(fh, format='fasta')` "
-        "to obtain a generator of `BiologicalSequence` objects (or "
-        "subclasses, see the `constructor` parameter).", DeprecationWarning)
-
-    for rec in finder(infile):
-        # first line must be a label line
-        if not rec[0][0] in label_characters:
-            if strict:
-                raise RecordError(
-                    "Found Fasta record without label line: %s" % rec)
-            else:
-                continue
-        # record must have at least one sequence
-        if len(rec) < 2:
-            if strict:
-                raise RecordError(
-                    "Found label line without sequences: %s" % rec)
-            else:
-                continue
-
-        # remove the label character from the beginning of the label
-        label = rec[0][1:].strip()
-        # if the user passed a label_to_name function, apply that to the label
-        if label_to_name is not None:
-            label = label_to_name(label)
-        # otherwise, if the user passed ignore_comment, split the label on
-        # spaces, and return the first space separated field (i.e., the
-        # sequence identifier)
-        elif ignore_comment:
-            label = label.split()[0]
-        else:
-            pass
-
-        # join the sequence lines into a single string
-        seq = ''.join(rec[1:])
-
-        yield label, seq
-
-
-def parse_qual(infile, full_header=False):
-    r"""yields label and qual from a qual file.
-
-    .. note:: Deprecated in scikit-bio 0.2.0-dev
-       ``parse_qual`` will be removed in scikit-bio 0.3.0. It is replaced by
-       ``read``, which is a more general method for deserializing
-       FASTA/QUAL-formatted files. ``read`` supports multiple file formats,
-       automatic file format detection, etc. by taking advantage of
-       scikit-bio's I/O registry system. See :mod:`skbio.io` for more details.
-
-    Parameters
-    ----------
-    infile : open file object or str
-        An open fasta file or path to it.
-
-    full_header : bool
-        Return the full header or just the id
-
-    Returns
-    -------
-    label : str
-        The quality label
-    qual : array
-        The quality at each position
-
-    Examples
-    --------
-    Assume we have a qual formatted file with the following contents::
-
-        >seq1
-        10 20 30 40
-        >seq2
-        1 2 3 4
-
-    >>> from StringIO import StringIO
-    >>> from skbio.parse.sequences import parse_qual
-    >>> qual_f = StringIO('>seq1\n'
-    ...                   '10 20 30 40\n'
-    ...                   '>seq2\n'
-    ...                   '1 2 3 4\n')
-    >>> for label, qual in parse_qual(qual_f):
-    ...     print(label)
-    ...     print(qual)
-    seq1
-    [10 20 30 40]
-    seq2
-    [1 2 3 4]
-
-    """
-    warnings.warn(
-        "`parse_qual` is deprecated and will be removed in scikit-bio 0.3.0. "
-        "Please update your code to use "
-        "`skbio.io.read(fasta_fh, qual=qual_fh, format='fasta')` to obtain a "
-        "generator of `BiologicalSequence` objects (or subclasses, see the "
-        "`constructor` parameter) with quality scores.", DeprecationWarning)
-
-    for rec in FastaFinder(infile):
-        curr_id = rec[0][1:]
-        curr_qual = ' '.join(rec[1:])
-        try:
-            parts = np.asarray(curr_qual.split(), dtype=int)
-        except ValueError:
-            raise RecordError(
-                "Invalid qual file. Check the format of the qual file: each "
-                "quality score must be convertible to an integer.")
-        if full_header:
-            curr_pid = curr_id
-        else:
-            curr_pid = curr_id.split()[0]
-        yield (curr_pid, parts)
diff --git a/skbio/parse/sequences/fastq.py b/skbio/parse/sequences/fastq.py
deleted file mode 100644
index 4d2736d..0000000
--- a/skbio/parse/sequences/fastq.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-from __future__ import absolute_import, division, print_function
-from future.standard_library import hooks
-
-import warnings
-
-import numpy as np
-
-from skbio.io.util import open_file
-from ._exception import FastqParseError
-
-with hooks():
-    from itertools import zip_longest
-
-
-def _ascii_to_phred(s, offset):
-    """Convert ascii to Phred quality score with specified ASCII offset."""
-    return np.fromstring(s, dtype='|S1').view(np.int8) - offset
-
-
-def ascii_to_phred33(s):
-    """Convert ascii string to Phred quality score with ASCII offset of 33.
-
-    Standard "Sanger" ASCII offset of 33. This is used by Illumina in CASAVA
-    versions after 1.8.0, and most other places. Note that internal Illumina
-    files still use offset of 64
-    """
-    return _ascii_to_phred(s, 33)
-
-
-def ascii_to_phred64(s):
-    """Convert ascii string to Phred quality score with ASCII offset of 64.
-
-    Illumina-specific ASCII offset of 64. This is used by Illumina in CASAVA
-    versions prior to 1.8.0, and in Illumina internal formats (e.g.,
-    export.txt files).
-    """
-    return _ascii_to_phred(s, 64)
-
-
-def _drop_id_marker(s):
-    """Drop the first character and decode bytes to text"""
-    id_ = s[1:]
-    try:
-        return str(id_.decode('utf-8'))
-    except AttributeError:
-        return id_
-
-
-def parse_fastq(data, strict=False, enforce_qual_range=True, phred_offset=33):
-    r"""yields label, seq, and qual from a fastq file.
-
-    .. note:: Deprecated in scikit-bio 0.2.0-dev
-       ``parse_fastq`` will be removed in scikit-bio 0.3.0. It is replaced by
-       ``read``, which is a more general method for deserializing
-       FASTQ-formatted files. ``read`` supports multiple file formats,
-       automatic file format detection, etc. by taking advantage of
-       scikit-bio's I/O registry system. See :mod:`skbio.io` for more details.
-
-    Parameters
-    ----------
-    data : open file object or str
-        An open fastq file (opened in binary mode) or a path to it.
-    strict : bool, optional
-        Defaults to ``False``. If strict is true a FastqParse error will be
-        raised if the seq and qual labels dont' match.
-    enforce_qual_range : bool, optional
-        Defaults to ``True``. If ``True``, an exception will be raised if a
-        quality score outside the range [0, 62] is detected
-    phred_offset : {33, 64}, optional
-        What Phred offset to use when converting qual score symbols to integers
-
-    Returns
-    -------
-    label, seq, qual : (str, bytes, np.array)
-        yields the label, sequence and quality for each entry
-
-    Examples
-    --------
-    Assume we have a fastq formatted file with the following contents::
-
-        @seq1
-        AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
-        +
-        ````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
-        @seq2
-        TATGTATATATAACATATACATATATACATACATA
-        +
-        ]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
-
-    We can use the following code:
-
-    >>> from StringIO import StringIO
-    >>> from skbio.parse.sequences import parse_fastq
-    >>> fastq_f = StringIO('@seq1\n'
-    ...                     'AACACCAAACTTCTCCACCACGTGAGCTACAAAAG\n'
-    ...                     '+\n'
-    ...                     '````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF\n'
-    ...                     '@seq2\n'
-    ...                     'TATGTATATATAACATATACATATATACATACATA\n'
-    ...                     '+\n'
-    ...                     ']KZ[PY]_[YY^```ac^\\\`bT``c`\\aT``bbb\n')
-    >>> for label, seq, qual in parse_fastq(fastq_f, phred_offset=64):
-    ...     print(label)
-    ...     print(seq)
-    ...     print(qual)
-    seq1
-    AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
-    [32 32 32 32 25 30 20 29 32 29 35 30 35 33 34 35 33 35 35 32 30 12 34 30 35
-     35 25 20 28 20 28 25 28 23  6]
-    seq2
-    TATGTATATATAACATATACATATATACATACATA
-    [29 11 26 27 16 25 29 31 27 25 25 30 32 32 32 33 35 30 28 28 32 34 20 32 32
-     35 32 28 33 20 32 32 34 34 34]
-    """
-    warnings.warn(
-        "`parse_fastq` is deprecated and will be removed in scikit-bio 0.3.0. "
-        "Please update your code to use `skbio.io.read(fh, format='fastq')` "
-        "to obtain a generator of `BiologicalSequence` objects (or "
-        "subclasses, see the `constructor` parameter).", DeprecationWarning)
-
-    if phred_offset == 33:
-        phred_f = ascii_to_phred33
-    elif phred_offset == 64:
-        phred_f = ascii_to_phred64
-    else:
-        raise ValueError("Unknown PHRED offset of %s" % phred_offset)
-
-    with open_file(data, 'rb') as data:
-        iters = [iter(data)] * 4
-        for seqid, seq, qualid, qual in zip_longest(*iters):
-            seqid = seqid.strip()
-            # If the file simply ended in a blankline, do not error
-            if seqid is '':
-                continue
-            # Error if an incomplete record is found
-            # Note: seqid cannot be None, because if all 4 values were None,
-            # then the loop condition would be false, and we could not have
-            # gotten to this point
-            if seq is None or qualid is None or qual is None:
-                raise FastqParseError("Incomplete FASTQ record found at end "
-                                      "of file")
-
-            seq = seq.strip()
-            qualid = qualid.strip()
-            qual = qual.strip()
-
-            seqid = _drop_id_marker(seqid)
-
-            try:
-                seq = str(seq.decode("utf-8"))
-            except AttributeError:
-                pass
-
-            qualid = _drop_id_marker(qualid)
-            if strict:
-                if seqid != qualid:
-                    raise FastqParseError('ID mismatch: {} != {}'.format(
-                        seqid, qualid))
-
-            # bounds based on illumina limits, see:
-            # http://nar.oxfordjournals.org/content/38/6/1767/T1.expansion.html
-            qual = phred_f(qual)
-            if enforce_qual_range and ((qual < 0).any() or (qual > 62).any()):
-                raise FastqParseError("Failed qual conversion for seq id: %s. "
-                                      "This may be because you passed an "
-                                      "incorrect value for phred_offset." %
-                                      seqid)
-
-            yield (seqid, seq, qual)
diff --git a/skbio/parse/sequences/iterator.py b/skbio/parse/sequences/iterator.py
deleted file mode 100644
index 6dd4cb1..0000000
--- a/skbio/parse/sequences/iterator.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from itertools import chain
-
-from future.builtins import zip
-
-from skbio.workflow import Workflow, not_none, method, requires
-from .fasta import parse_fasta, parse_qual
-from .fastq import parse_fastq
-
-
-def _has_qual(item):
-    """Return True if it appears that there is qual data"""
-    return (item['QualID'] is not None) and (item['Qual'] is not None)
-
-
-class SequenceIterator(Workflow):
-    """Provide a standard API for interacting with sequence data
-
-    Provide a common interface for iterating over sequence data, including
-    support for quality scores and transforms.
-
-    A transform method is a function that takes the state dict and modifies it
-    in place. For instance, to reverse sequences, you could pass in the
-    following function:
-
-    >>> def reverse(st):
-    ...    st['Sequence']= st['Sequence'][::-1]
-    ...    st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
-
-    as ``transform``. The primary intention is to support reverse complementing
-    of sequences.
-
-    All subclasses of this object are expected to update the following in
-    ``state``:
-
-        * SequenceID : str, the sequence identifier
-        * Sequence   : str, the sequence itself
-        * QualID     : str or None, the quality ID (for completeness)
-        * Qual       : np.array or None, the quality scores
-
-    ``state`` is preallocated a single time to avoid repetitive allocations.
-    What this means is that the object being yielded is updated in place. If
-    an individual record needs to be tracked over time, then it is recommended
-    that copies of the yielded data are made.
-
-    *WARNING*: The yielded obj is not safe for use with Python 2.7's builtin
-    `zip` method as the state is updated in place.
-
-    Parameters
-    ----------
-
-    seq : list of open file-like objects
-    qual : list of open file-like objects or None
-    transform : function or None
-        If provided, this function will be passed ``state``
-    valid_id : bool
-        If true, verify sequence and qual IDs are identical (if relevant)
-    valid_length : bool
-        If true, verify the length of the sequence and qual are the same
-        (if relevant)
-
-    Attributes
-    ----------
-
-    seq
-    qual
-    state
-    options
-
-    """
-    def __init__(self, seq, qual=None, transform=None, valid_id=True,
-                 valid_length=True, **kwargs):
-        self.seq = seq
-        self.qual = qual
-
-        self._transform = transform
-
-        state = {'SequenceID': None,
-                 'Sequence': None,
-                 'QualID': None,
-                 'Qual': None}
-
-        options = {'transform': self._transform,
-                   'valid_id': valid_id,
-                   'valid_length': valid_length}
-
-        if self.seq is None:
-            raise ValueError("SequenceIterator requires sequences!")
-
-        super(SequenceIterator, self).__init__(state, options=options)
-
-    def _gen(self):
-        """Yield a populated record"""
-        raise NotImplementedError("Must be implemented by subclass")
-
-    def __call__(self):
-        return super(SequenceIterator, self).__call__(self._gen())
-
-    def __iter__(self):
-        return self()
-
-    def initialize_state(self, item):
-        """Do nothing here as the subclassed iterators update state directly"""
-        pass
-
-    @method(priority=100)
-    @requires(option='valid_id', values=True, state=_has_qual)
-    def validate_ids(self):
-        self.failed = self.state['SequenceID'] != self.state['QualID']
-
-    @method(priority=90)
-    @requires(option='valid_length', values=True, state=_has_qual)
-    def valid_lengths(self):
-        self.failed = len(self.state['Sequence']) != len(self.state['Qual'])
-
-    @method(priority=80)
-    @requires(option='transform', values=not_none)
-    def transform(self):
-        """Transform state if necessary"""
-        self._transform(self.state)
-
-
-class FastaIterator(SequenceIterator):
-    """Populate state based on fasta sequence and qual (if provided)"""
-    def _gen(self):
-        """Construct internal iterators"""
-        # construct fasta generators
-        fasta_gens = chain(*[parse_fasta(f) for f in self.seq])
-
-        # construct qual generators if necessary
-        if self.qual is not None:
-            qual_gens = chain(*[parse_qual(f) for f in self.qual])
-        else:
-            qual_gens = None
-
-        # determine which specific generator to return
-        if qual_gens is None:
-            gen = self._fasta_gen(fasta_gens)
-        else:
-            gen = self._fasta_qual_gen(fasta_gens, qual_gens)
-
-        return gen
-
-    def _fasta_gen(self, fasta_gens):
-        """Yield fasta data"""
-        _iter = fasta_gens
-        for (seq_id, seq) in _iter:
-            self.state['SequenceID'] = seq_id
-            self.state['Sequence'] = seq
-
-            # as we're updating state in place and effectively circumventing
-            # Workflow.initialize_state, we do not need to yield anything
-            yield None
-
-    def _fasta_qual_gen(self, fasta_gen, qual_gen):
-        """Yield fasta and qual together"""
-        _iter = zip(fasta_gen, qual_gen)
-        for (seq_id, seq), (qual_id, qual) in _iter:
-            self.state['SequenceID'] = seq_id
-            self.state['Sequence'] = seq
-            self.state['QualID'] = qual_id
-            self.state['Qual'] = qual
-
-            # as we're updating state in place and effectively circumventing
-            # Workflow.initialize_state, we do not need to yield anything
-            yield None
-
-
-class FastqIterator(SequenceIterator):
-    """Populate state based on fastq sequence
-
-    Note: thq 'qual' keyword argument is ignored by this object.
-    """
-    def __init__(self, *args, **kwargs):
-        if 'phred_offset' in kwargs:
-            self._fpo = kwargs.pop('phred_offset')
-        else:
-            # force to an offset of 33
-            self._fpo = 33
-
-        super(FastqIterator, self).__init__(*args, **kwargs)
-
-    def _gen(self):
-        """Construct internal iterators"""
-        fastq_gens = chain(*[parse_fastq(f, phred_offset=self._fpo)
-                             for f in self.seq])
-        return self._fastq_gen(fastq_gens)
-
-    def _fastq_gen(self, fastq_gens):
-        """Yield fastq data"""
-        for (seq_id, seq, qual) in fastq_gens:
-            self.state['SequenceID'] = seq_id
-            self.state['Sequence'] = seq
-            self.state['QualID'] = seq_id
-            self.state['Qual'] = qual
-
-            # as we're updating state in place and effectively circumventing
-            # Workflow.initialize_state, we do not need to yield anything
-            yield None
diff --git a/skbio/parse/sequences/tests/__init__.py b/skbio/parse/sequences/tests/__init__.py
deleted file mode 100644
index 774824a..0000000
--- a/skbio/parse/sequences/tests/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
diff --git a/skbio/parse/sequences/tests/data/fna1.fasta b/skbio/parse/sequences/tests/data/fna1.fasta
deleted file mode 100644
index 40db637..0000000
--- a/skbio/parse/sequences/tests/data/fna1.fasta
+++ /dev/null
@@ -1,4 +0,0 @@
->s1
-ATGC
->s2
-AATTGG
diff --git a/skbio/parse/sequences/tests/data/fna1.fna.gz b/skbio/parse/sequences/tests/data/fna1.fna.gz
deleted file mode 100644
index 79b4f8b..0000000
Binary files a/skbio/parse/sequences/tests/data/fna1.fna.gz and /dev/null differ
diff --git a/skbio/parse/sequences/tests/data/fna1.qual b/skbio/parse/sequences/tests/data/fna1.qual
deleted file mode 100644
index ece3e81..0000000
--- a/skbio/parse/sequences/tests/data/fna1.qual
+++ /dev/null
@@ -1,4 +0,0 @@
->s1
-10 10 10 10
->s2
-10 20 30 40 30 20
diff --git a/skbio/parse/sequences/tests/data/fq1.fastq.gz b/skbio/parse/sequences/tests/data/fq1.fastq.gz
deleted file mode 100644
index e4ed599..0000000
Binary files a/skbio/parse/sequences/tests/data/fq1.fastq.gz and /dev/null differ
diff --git a/skbio/parse/sequences/tests/data/fq1.fq b/skbio/parse/sequences/tests/data/fq1.fq
deleted file mode 100644
index a15ea63..0000000
--- a/skbio/parse/sequences/tests/data/fq1.fq
+++ /dev/null
@@ -1,8 +0,0 @@
- at s1
-ATGC
-+
-hhhh
- at s2
-AATTGG
-+
-gggghh
diff --git a/skbio/parse/sequences/tests/data/noextensionfasta b/skbio/parse/sequences/tests/data/noextensionfasta
deleted file mode 100644
index d262c0f..0000000
--- a/skbio/parse/sequences/tests/data/noextensionfasta
+++ /dev/null
@@ -1,4 +0,0 @@
->seq1
-AATTGG
->seq2
-ATATA
diff --git a/skbio/parse/sequences/tests/data/qs1.qseq.gz b/skbio/parse/sequences/tests/data/qs1.qseq.gz
deleted file mode 100644
index c97ed7a..0000000
Binary files a/skbio/parse/sequences/tests/data/qs1.qseq.gz and /dev/null differ
diff --git a/skbio/parse/sequences/tests/test_clustal.py b/skbio/parse/sequences/tests/test_clustal.py
deleted file mode 100644
index b2a9c7e..0000000
--- a/skbio/parse/sequences/tests/test_clustal.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python
-# -----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-from __future__ import absolute_import, division, print_function
-
-from unittest import TestCase, main
-
-from skbio.parse.sequences import parse_clustal, write_clustal
-from skbio.parse.sequences.clustal import (_is_clustal_seq_line, last_space,
-                                           _delete_trailing_number)
-from skbio.io import RecordError
-
-
-class ClustalTests(TestCase):
-
-    """Tests of top-level functions."""
-
-    def test_is_clustal_seq_line(self):
-        """_is_clustal_seq_line should reject blanks and 'CLUSTAL'"""
-        ic = _is_clustal_seq_line
-        assert ic('abc')
-        assert ic('abc  def')
-        assert not ic('CLUSTAL')
-        assert not ic('CLUSTAL W fsdhicjkjsdk')
-        assert not ic('  *   *')
-        assert not ic(' abc def')
-        assert not ic('MUSCLE (3.41) multiple sequence alignment')
-
-    def test_last_space(self):
-        """last_space should split on last whitespace"""
-        self.assertEqual(last_space('a\t\t\t  b    c'), ['a b', 'c'])
-        self.assertEqual(last_space('xyz'), ['xyz'])
-        self.assertEqual(last_space('  a b'), ['a', 'b'])
-
-    def test_delete_trailing_number(self):
-        """Should delete the trailing number if present"""
-        dtn = _delete_trailing_number
-        self.assertEqual(dtn('abc'), 'abc')
-        self.assertEqual(dtn('a b c'), 'a b c')
-        self.assertEqual(dtn('a \t  b  \t  c'), 'a \t  b  \t  c')
-        self.assertEqual(dtn('a b 3'), 'a b')
-        self.assertEqual(dtn('a b c \t 345'), 'a b c')
-
-
-class ClustalParserTests(TestCase):
-
-    """Tests of the parse_clustal function"""
-
-    def test_null(self):
-        """Should return empty dict and list on null input"""
-        result = parse_clustal([])
-        self.assertEqual(dict(result), {})
-
-    def test_minimal(self):
-        """Should handle single-line input correctly"""
-        result = parse_clustal([MINIMAL])  # expects seq of lines
-        self.assertEqual(dict(result), {'abc': 'ucag'})
-
-    def test_two(self):
-        """Should handle two-sequence input correctly"""
-        result = parse_clustal(TWO)
-        self.assertEqual(dict(result), {'abc': 'uuuaaa', 'def': 'cccggg'})
-
-    def test_real(self):
-        """Should handle real Clustal output"""
-        data = parse_clustal(REAL)
-        self.assertEqual(dict(data), {
-            'abc':
-            'GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA'
-            'GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC'
-            'UGACUAGUCAGCUAGCAUCGAUCAGU',
-            'def':
-            '------------------------------------------------------------'
-            '-----------------------------------------CGCGAUGCAUGCAU-CGAU'
-            'CGAUCAGUCAGUCGAU----------',
-            'xyz':
-            '------------------------------------------------------------'
-            '-------------------------------------CAUGCAUCGUACGUACGCAUGAC'
-            'UGCUGCAUCA----------------'
-        })
-
-    def test_bad(self):
-        """Should reject bad data if strict"""
-        result = parse_clustal(BAD, strict=False)
-        self.assertEqual(dict(result), {})
-        # should fail unless we turned strict processing off
-        with self.assertRaises(RecordError):
-            dict(parse_clustal(BAD))
-
-    def test_space_labels(self):
-        """Should tolerate spaces in labels"""
-        result = parse_clustal(SPACE_LABELS)
-        self.assertEqual(dict(result), {'abc': 'uca', 'def ggg': 'ccc'})
-
-    def test_write(self):
-        """Should write real Clustal output"""
-        import os
-        fname = "test.aln"
-        testfile = open(fname, 'w')
-        seqs = [('abc',
-                 'GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA'
-                 'GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC'
-                 'UGACUAGUCAGCUAGCAUCGAUCAGU'),
-                ('def',
-                 '------------------------------------------------------------'
-                 '-----------------------------------------CGCGAUGCAUGCAU-CGAU'
-                 'CGAUCAGUCAGUCGAU----------'),
-                ('xyz',
-                 '------------------------------------------------------------'
-                 '-------------------------------------CAUGCAUCGUACGUACGCAUGAC'
-                 'UGCUGCAUCA----------------')]
-        records = (x for x in seqs)
-        write_clustal(records, testfile)
-        testfile.close()
-        raw = open(fname, 'r').read()
-        data = parse_clustal(raw.split('\n'))
-        data = list(data)
-        self.assertEqual(len(data), len(seqs))
-        self.assertEqual(set(data), set(seqs))
-        testfile.close()
-        os.remove(fname)
-
-MINIMAL = 'abc\tucag'
-TWO = 'abc\tuuu\ndef\tccc\n\n    ***\n\ndef ggg\nabc\taaa\n'.split('\n')
-
-REAL = """CLUSTAL W (1.82) multiple sequence alignment
-
-
-abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA 60
-def             ------------------------------------------------------------
-xyz             ------------------------------------------------------------
-
-
-abc             GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC 11
-def             -----------------------------------------CGCGAUGCAUGCAU-CGAU 18
-xyz             -------------------------------------CAUGCAUCGUACGUACGCAUGAC 23
-                                                         *    * * * *    **
-
-abc             UGACUAGUCAGCUAGCAUCGAUCAGU 145
-def             CGAUCAGUCAGUCGAU---------- 34
-xyz             UGCUGCAUCA---------------- 33
-                *     ***""".split('\n')
-
-BAD = ['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']
-
-SPACE_LABELS = ['abc uca', 'def ggg ccc']
-
-
-if __name__ == '__main__':
-    main()
diff --git a/skbio/parse/sequences/tests/test_factory.py b/skbio/parse/sequences/tests/test_factory.py
deleted file mode 100644
index 19f222f..0000000
--- a/skbio/parse/sequences/tests/test_factory.py
+++ /dev/null
@@ -1,201 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from unittest import TestCase, main
-
-from numpy import array
-
-from skbio import FastaIterator
-from skbio.parse.sequences import load
-from skbio.parse.sequences.factory import (
-    _open_or_none, _is_single_iterator_type)
-from skbio.util import get_data_path
-
-
-class SequenceLoadTests(TestCase):
-    def setUp(self):
-        self.fna1 = get_data_path('fna1.fasta')
-        self.fna1gz = get_data_path('fna1.fna.gz')
-        self.fq1 = get_data_path('fq1.fq')
-        self.fq1gz = get_data_path('fq1.fastq.gz')
-        self.qual1 = get_data_path('fna1.qual')
-        self.noext = get_data_path('noextensionfasta')
-
-    def test_single_files(self):
-        """load should handle a single file, and can be gzipped"""
-        it = load(self.fna1)
-        obs = [rec.copy() for rec in it]
-        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
-                'QualID': None, 'Qual': None},
-               {'Sequence': 'AATTGG', 'SequenceID': 's2',
-                'QualID': None, 'Qual': None}]
-        self.assertEqual(obs, exp)
-        it = load(self.fq1, phred_offset=64)
-        obs = [rec.copy() for rec in it]
-        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
-                'QualID': 's1', 'Qual': array([40, 40, 40, 40])},
-               {'Sequence': 'AATTGG', 'SequenceID': 's2',
-                'QualID': 's2', 'Qual': array([39, 39, 39, 39, 40, 40])}]
-        for o, e in zip(obs, exp):
-            self.assertEqual(o['Sequence'], e['Sequence'])
-            self.assertEqual(o['SequenceID'], e['SequenceID'])
-            self.assertEqual(o['QualID'], e['QualID'])
-            self.assertTrue((o['Qual'] == e['Qual']).all())
-
-        it = load(self.fna1gz)
-        obs = [rec.copy() for rec in it]
-        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
-                'QualID': None, 'Qual': None},
-               {'Sequence': 'AATTGG', 'SequenceID': 's2',
-                'QualID': None, 'Qual': None}]
-        self.assertEqual(obs, exp)
-
-        it = load(self.fq1gz, phred_offset=64)
-        obs = [rec.copy() for rec in it]
-        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
-                'QualID': 's1', 'Qual': array([40, 40, 40, 40])},
-               {'Sequence': 'AATTGG', 'SequenceID': 's2',
-                'QualID': 's2', 'Qual': array([39, 39, 39, 39, 40, 40])}]
-        for o, e in zip(obs, exp):
-            self.assertEqual(o['Sequence'], e['Sequence'])
-            self.assertEqual(o['SequenceID'], e['SequenceID'])
-            self.assertEqual(o['QualID'], e['QualID'])
-            self.assertTrue((o['Qual'] == e['Qual']).all())
-
-    def test_multiple_files(self):
-        """load should handle multiple files of different types"""
-        it = load([self.fq1, self.fna1], phred_offset=64)
-        obs = [rec.copy() for rec in it]
-        exp = [{'Sequence': 'ATGC', 'SequenceID': 's1',
-                'QualID': 's1', 'Qual': array([40, 40, 40, 40])},
-               {'Sequence': 'AATTGG', 'SequenceID': 's2',
-                'QualID': 's2', 'Qual': array([39, 39, 39, 39, 40, 40])},
-               {'Sequence': 'ATGC', 'SequenceID': 's1',
-                'QualID': None, 'Qual': None},
-               {'Sequence': 'AATTGG', 'SequenceID': 's2',
-                'QualID': None, 'Qual': None}]
-
-        o = obs[0]
-        e = exp[0]
-        self.assertEqual(o['Sequence'], e['Sequence'])
-        self.assertEqual(o['SequenceID'], e['SequenceID'])
-        self.assertEqual(o['QualID'], e['QualID'])
-        self.assertTrue((o['Qual'] == e['Qual']).all())
-
-        o = obs[1]
-        e = exp[1]
-        self.assertEqual(o['Sequence'], e['Sequence'])
-        self.assertEqual(o['SequenceID'], e['SequenceID'])
-        self.assertEqual(o['QualID'], e['QualID'])
-        self.assertTrue((o['Qual'] == e['Qual']).all())
-
-        o = obs[2]
-        e = exp[2]
-        self.assertEqual(o['Sequence'], e['Sequence'])
-        self.assertEqual(o['SequenceID'], e['SequenceID'])
-        self.assertEqual(o['QualID'], e['QualID'])
-        self.assertEqual(o['Qual'], e['Qual'])
-
-        o = obs[3]
-        e = exp[3]
-        self.assertEqual(o['Sequence'], e['Sequence'])
-        self.assertEqual(o['SequenceID'], e['SequenceID'])
-        self.assertEqual(o['QualID'], e['QualID'])
-        self.assertEqual(o['Qual'], e['Qual'])
-
-    def test_transform(self):
-        """load should pass transform methods to the iterators"""
-        def rev_f(st):
-            st['Sequence'] = st['Sequence'][::-1]
-            st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
-
-        it = load([self.fq1gz, self.fna1], transform=rev_f, phred_offset=64)
-        obs = [rec.copy() for rec in it]
-        exp = [{'Sequence': 'CGTA', 'SequenceID': 's1',
-                'QualID': 's1', 'Qual': array([40, 40, 40, 40])},
-               {'Sequence': 'GGTTAA', 'SequenceID': 's2',
-                'QualID': 's2', 'Qual': array([40, 40, 39, 39, 39, 39])},
-               {'Sequence': 'CGTA', 'SequenceID': 's1',
-                'QualID': None, 'Qual': None},
-               {'Sequence': 'GGTTAA', 'SequenceID': 's2',
-                'QualID': None, 'Qual': None}]
-
-        o = obs[0]
-        e = exp[0]
-        self.assertEqual(o['Sequence'], e['Sequence'])
-        self.assertEqual(o['SequenceID'], e['SequenceID'])
-        self.assertEqual(o['QualID'], e['QualID'])
-        self.assertTrue((o['Qual'] == e['Qual']).all())
-
-        o = obs[1]
-        e = exp[1]
-        self.assertEqual(o['Sequence'], e['Sequence'])
-        self.assertEqual(o['SequenceID'], e['SequenceID'])
-        self.assertEqual(o['QualID'], e['QualID'])
-        self.assertTrue((o['Qual'] == e['Qual']).all())
-
-        o = obs[2]
-        e = exp[2]
-        self.assertEqual(o['Sequence'], e['Sequence'])
-        self.assertEqual(o['SequenceID'], e['SequenceID'])
-        self.assertEqual(o['QualID'], e['QualID'])
-        self.assertEqual(o['Qual'], e['Qual'])
-
-        o = obs[3]
-        e = exp[3]
-        self.assertEqual(o['Sequence'], e['Sequence'])
-        self.assertEqual(o['SequenceID'], e['SequenceID'])
-        self.assertEqual(o['QualID'], e['QualID'])
-        self.assertEqual(o['Qual'], e['Qual'])
-
-    def test_force_constructor(self):
-        it = load([self.noext], constructor=FastaIterator)
-        obs = [rec.copy() for rec in it]
-        exp = [{'Sequence': 'AATTGG', 'SequenceID': 'seq1',
-                'Qual': None, 'QualID': None},
-               {'Sequence': 'ATATA', 'SequenceID': 'seq2',
-                'Qual': None, 'QualID': None}]
-        self.assertEqual(obs, exp)
-
-    def test_no_seqs(self):
-        for null in ('', [], (), None):
-            with self.assertRaises(ValueError):
-                load(null)
-
-    def test_unknown_filetype(self):
-        with self.assertRaises(IOError):
-            load('seqs.mpeg')
-
-    def test_file_path_does_not_exist(self):
-        with self.assertRaises(IOError):
-            load('this-seqs-file-had-better-not-exist-or-this-test-will-'
-                 'fail.fna')
-
-    def test_multiple_types_fasta_fastq_qual(self):
-        with self.assertRaises(ValueError):
-            load([self.fna1, self.fq1], qual=self.qual1)
-
-    def test_open_or_none_no_opener(self):
-        obs = _open_or_none(None, self.fna1)
-        self.assertTrue(obs is None)
-
-    def test_open_or_none_opener_error(self):
-        def bogus_opener(f):
-            raise IOError('hahaha')
-
-        with self.assertRaises(IOError):
-            _open_or_none(bogus_opener, self.fna1)
-
-    def test_is_single_iterator_type_null_case(self):
-        self.assertTrue(_is_single_iterator_type([]))
-
-
-if __name__ == '__main__':
-    main()
diff --git a/skbio/parse/sequences/tests/test_fasta.py b/skbio/parse/sequences/tests/test_fasta.py
deleted file mode 100644
index 36a2196..0000000
--- a/skbio/parse/sequences/tests/test_fasta.py
+++ /dev/null
@@ -1,196 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-from __future__ import absolute_import, division, print_function
-
-import tempfile
-from unittest import TestCase, main
-
-import numpy.testing as npt
-
-from skbio import parse_fasta, parse_qual
-from skbio.io import RecordError
-
-
-FASTA_PARSERS_DATA = {
-    'labels': '>abc\n>def\n>ghi\n',
-    'oneseq': '>abc\nUCAG\n',
-    'multiline': '>xyz\nUUUU\nCC\nAAAAA\nG',
-    'threeseq': '>123\na\n> \t abc  \t \ncag\ngac\n>456\nc\ng',
-    'twogood': '>123\n\n> \t abc  \t \ncag\ngac\n>456\nc\ng',
-    'oneX': '>123\nX\n> \t abc  \t \ncag\ngac\n>456\nc\ng',
-    'nolabels': 'GJ>DSJGSJDF\nSFHKLDFS>jkfs\n',
-    'empty': '',
-    'qualscores': '>x\n5 10 5\n12\n>y foo bar\n30 40\n>a   \n5 10 5\n12\n'
-                  '>b  baz\n30 40',
-    'invalidqual': '>x\n5 10 5\n12\n>y\n30 40\n>a\n5 10 5\n12 brofist 42'
-    }
-
-
-class IterableData(object):
-    """Store fasta data as lists of strings."""
-    def setUp(self):
-        for attr, val in FASTA_PARSERS_DATA.items():
-            setattr(self, attr, val.split('\n'))
-
-
-class FileData(object):
-    """Store fasta data as file names pointing to the data."""
-    def setUp(self):
-        tmp_files = []
-        for attr, val in FASTA_PARSERS_DATA.items():
-            tmp_file = tempfile.NamedTemporaryFile('r+')
-            tmp_file.write(val)
-            tmp_file.flush()
-            tmp_file.seek(0)
-            setattr(self, attr, tmp_file.name)
-            tmp_files.append(tmp_file)
-        self._tmp_files = tmp_files
-
-    def tearDown(self):
-        for tmp_file in self._tmp_files:
-            tmp_file.close()
-
-
-class ParseFastaTests(object):
-
-    """Tests of parse_fasta: returns (label, seq) tuples."""
-
-    def test_empty(self):
-        """parse_fasta should return empty list from 'file' w/o labels
-        """
-        self.assertEqual(list(parse_fasta(self.empty)), [])
-        self.assertEqual(list(parse_fasta(self.nolabels, strict=False)),
-                         [])
-        self.assertRaises(RecordError, list, parse_fasta(self.nolabels))
-
-    def test_no_labels(self):
-        """parse_fasta should return empty list from file w/o seqs"""
-        # should fail if strict (the default)
-        self.assertRaises(RecordError, list,
-                          parse_fasta(self.labels, strict=True))
-        # if not strict, should skip the records
-        self.assertEqual(list(parse_fasta(self.labels, strict=False)),
-                         [])
-
-    def test_single(self):
-        """parse_fasta should read single record as (label, seq) tuple
-        """
-        f = list(parse_fasta(self.oneseq))
-        self.assertEqual(len(f), 1)
-        a = f[0]
-        self.assertEqual(a, ('abc', 'UCAG'))
-
-        f = list(parse_fasta(self.multiline))
-        self.assertEqual(len(f), 1)
-        a = f[0]
-        self.assertEqual(a, ('xyz', 'UUUUCCAAAAAG'))
-
-    def test_gt_bracket_in_seq(self):
-        """parse_fasta handles alternate finder function
-
-            this test also illustrates how to use the parse_fasta
-            to handle "sequences" that start with a > symbol, which can
-            happen when we abuse the parse_fasta to parse
-            fasta-like sequence quality files.
-        """
-        oneseq_w_gt = '>abc\n>CAG\n'.split('\n')
-
-        def get_two_line_records(infile):
-            line1 = None
-            for line in infile:
-                if line1 is None:
-                    line1 = line
-                else:
-                    yield (line1, line)
-                    line1 = None
-        f = list(parse_fasta(oneseq_w_gt, finder=get_two_line_records))
-        self.assertEqual(len(f), 1)
-        a = f[0]
-        self.assertEqual(a, ('abc', '>CAG'))
-
-    def test_parse_fasta_ignore_comment(self):
-        """parse_fasta correct ignores label comments when requested
-        """
-        in_ = '>1\nCAG\n>2 some other info\nCCAG\n>3 \nA'.split('\n')
-        # ignore_comment = False
-        actual = list(parse_fasta(in_))
-        expected = [('1', 'CAG'), ('2 some other info', 'CCAG'), ('3', 'A')]
-        self.assertEqual(actual, expected)
-        # ignore_comment = True
-        actual = list(parse_fasta(in_, ignore_comment=True))
-        expected = [('1', 'CAG'), ('2', 'CCAG'), ('3', 'A')]
-        self.assertEqual(actual, expected)
-
-    def test_parse_fasta_label_to_name(self):
-        exp = [('brofist', 'a'), ('brofist', 'caggac'), ('brofist', 'cg')]
-
-        # the most powerful fasta label converter known to mankind
-        obs = list(parse_fasta(self.threeseq,
-                   label_to_name=lambda _: 'brofist'))
-
-        self.assertEqual(obs, exp)
-
-    def test_multiple(self):
-        """parse_fasta should read multiline records correctly"""
-        f = list(parse_fasta(self.threeseq))
-        self.assertEqual(len(f), 3)
-        a, b, c = f
-        self.assertEqual(a, ('123', 'a'))
-        self.assertEqual(b, ('abc', 'caggac'))
-        self.assertEqual(c, ('456', 'cg'))
-
-    def test_multiple_bad_strict(self):
-        with self.assertRaises(RecordError):
-            list(parse_fasta(self.twogood))
-
-    def test_multiple_bad_not_strict(self):
-        f = list(parse_fasta(self.twogood, strict=False))
-        self.assertEqual(len(f), 2)
-        a, b = f
-        self.assertEqual(a, ('abc', 'caggac'))
-
-    def test_parse_qual(self):
-        exp = [('x', [5, 10, 5, 12]), ('y', [30, 40]), ('a', [5, 10, 5, 12]),
-               ('b', [30, 40])]
-        obs = parse_qual(self.qualscores)
-
-        for o, e in zip(obs, exp):
-            npt.assert_equal(o, e)
-
-    def test_parse_qual_invalid_qual_file(self):
-        with self.assertRaises(RecordError):
-            list(parse_qual(self.invalidqual))
-
-    def test_parse_qual_full_header(self):
-        exp = [('x', [5, 10, 5, 12]), ('y foo bar', [30, 40]),
-               ('a', [5, 10, 5, 12]), ('b  baz', [30, 40])]
-        obs = parse_qual(self.qualscores, full_header=True)
-
-        for o, e in zip(obs, exp):
-            npt.assert_equal(o, e)
-
-
-class ParseFastaTestsInputIsIterable(IterableData, ParseFastaTests, TestCase):
-    """Mixin: `parse_fasta` and `parse_qual` in ParseFastaTests gets lists
-    of strings.
-
-    """
-    pass
-
-
-class ParseFastaTestsInputIsFileNames(FileData, ParseFastaTests, TestCase):
-    """Mixin: `parse_fasta` and `parse_qual` in ParseFastaTests gets a
-    file name.
-
-    """
-    pass
-
-if __name__ == "__main__":
-    main()
diff --git a/skbio/parse/sequences/tests/test_fastq.py b/skbio/parse/sequences/tests/test_fastq.py
deleted file mode 100644
index 0d9f74e..0000000
--- a/skbio/parse/sequences/tests/test_fastq.py
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/usr/bin/env python
-# -----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-from __future__ import absolute_import, division, print_function
-
-from unittest import TestCase, main
-import tempfile
-
-from numpy import array
-
-from skbio import parse_fastq
-from skbio.parse.sequences import FastqParseError
-
-
-class IterableData(object):
-    def setUp(self):
-        """ Initialize variables to be used by the tests as lists of strings"""
-        self.FASTQ_EXAMPLE = FASTQ_EXAMPLE.split('\n')
-        self.FASTQ_EXAMPLE_2 = FASTQ_EXAMPLE_2.split('\n')
-        self.FASTQ_EXAMPLE_3 = FASTQ_EXAMPLE_3.split('\n')
-
-
-class FileData(object):
-    def setUp(self):
-        """ Initialize variables to be used by the tests as file names"""
-        tmp_files = []
-        for attr, val in [('FASTQ_EXAMPLE', FASTQ_EXAMPLE),
-                          ('FASTQ_EXAMPLE_2', FASTQ_EXAMPLE_2),
-                          ('FASTQ_EXAMPLE_3', FASTQ_EXAMPLE_3)]:
-            tmp_file = tempfile.NamedTemporaryFile('w')
-            tmp_file.write(val)
-            tmp_file.flush()
-            tmp_file.seek(0)
-            setattr(self, attr, tmp_file.name)
-            tmp_files.append(tmp_file)
-        self._tmp_files = tmp_files
-
-    def tearDown(self):
-        for tmp_file in self._tmp_files:
-            tmp_file.close()
-
-
-class ParseFastqTests(object):
-    def test_parse(self):
-        for label, seq, qual in parse_fastq(self.FASTQ_EXAMPLE,
-                                            phred_offset=64):
-            self.assertTrue(label in DATA)
-            self.assertEqual(seq, DATA[label]["seq"])
-            self.assertTrue((qual == DATA[label]["qual"]).all())
-
-        # Make sure that enforce_qual_range set to False allows qual scores
-        # to fall outside the typically acceptable range of 0-62
-        for label, seq, qual in parse_fastq(self.FASTQ_EXAMPLE_2,
-                                            phred_offset=33,
-                                            enforce_qual_range=False):
-            self.assertTrue(label in DATA_2)
-            self.assertEqual(seq, DATA_2[label]["seq"])
-            self.assertTrue((qual == DATA_2[label]["qual"]).all())
-
-        # This should raise a FastqParseError since the qual scores are
-        # intended to be interpreted with an offset of 64, and using 33 will
-        # make the qual score fall outside the acceptable range of 0-62.
-        with self.assertRaises(FastqParseError):
-            list(parse_fastq(self.FASTQ_EXAMPLE, phred_offset=33))
-
-    def test_parse_error(self):
-        with self.assertRaises(FastqParseError):
-            list(parse_fastq(self.FASTQ_EXAMPLE_2, strict=True))
-
-        with self.assertRaises(FastqParseError):
-            list(parse_fastq(self.FASTQ_EXAMPLE_3, phred_offset=64))
-
-    def test_invalid_phred_offset(self):
-        with self.assertRaises(ValueError):
-            list(parse_fastq(self.FASTQ_EXAMPLE, phred_offset=42))
-
-
-class ParseFastqTestsInputIsIterable(IterableData, ParseFastqTests, TestCase):
-    pass
-
-
-class ParseFastqTestsInputIsFileNames(FileData, ParseFastqTests, TestCase):
-    pass
-
-
-DATA = {
-    "GAPC_0015:6:1:1259:10413#0/1":
-    dict(seq='AACACCAAACTTCTCCACCACGTGAGCTACAAAAG',
-         qual=array([32, 32, 32, 32, 25, 30, 20, 29, 32, 29, 35, 30, 35, 33,
-                     34, 35, 33, 35, 35, 32, 30, 12, 34, 30, 35, 35, 25, 20,
-                     28, 20, 28, 25, 28, 23, 6])),
-    "GAPC_0015:6:1:1283:11957#0/1":
-    dict(seq='TATGTATATATAACATATACATATATACATACATA',
-         qual=array([29, 11, 26, 27, 16, 25, 29, 31, 27, 25, 25, 30, 32, 32,
-                     32, 33, 35, 30, 28, 28, 32, 34, 20, 32, 32, 35, 32, 28,
-                     33, 20, 32, 32, 34, 34, 34])),
-    "GAPC_0015:6:1:1284:10484#0/1":
-    dict(seq='TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG',
-         qual=array([21, 13, 31, 29, 29, 21, 31, 29, 26, 31, 25, 30, 28, 30,
-                     30, 32, 32, 25, 29, 32, 30, 19, 26, 29, 28, 25, 34, 34,
-                     32, 30, 31, 12, 34, 12, 31])),
-    "GAPC_0015:6:1:1287:17135#0/1":
-    dict(seq='TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA',
-         qual=array([30, 33, 33, 35, 35, 35, 12, 28, 35, 35, 35, 28, 35, 28,
-                     35, 20, 11, 20, 19, 29, 11, 26, 28, 29, 29,  9, 28, 27,
-                     23, 33, 30, 20, 32, 30, 11])),
-    "GAPC_0015:6:1:1293:3171#0/1":
-    dict(seq="AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA",
-             qual=array([34, 32, 34, 34, 34, 21, 31, 27, 25, 25, 35, 33, 36,
-                         35, 36, 33, 31, 12, 34, 33, 33, 33, 34, 23, 34, 33,
-                         33, 35, 25, 35, 35, 32, 33, 30, 35])),
-    "GAPC_0015:6:1:1297:10729#0/1":
-    dict(seq="TAATGCCAAAGAAATATTTCCAAACTACATGCTTA",
-             qual=array([20, 28, 35, 35, 12, 34, 34, 32, 32, 34, 33, 35, 35,
-                         29, 31, 35, 33, 35, 35, 35, 35, 35, 12, 35, 35, 35,
-                         28, 35, 35, 20, 35, 35, 25, 12, 30])),
-    "GAPC_0015:6:1:1299:5940#0/1":
-    dict(seq="AATCAAGAAATGAAGATTTATGTATGTGAAGAATA",
-             qual=array([36, 35, 36, 36, 34, 35, 38, 38, 38, 36, 38, 38, 38,
-                         36, 32, 36, 36, 32, 30, 32, 35, 32, 15, 35, 32, 25,
-                         34, 34, 32, 30, 37, 37, 35, 36, 37])),
-    "GAPC_0015:6:1:1308:6996#0/1":
-    dict(seq="TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA",
-             qual=array([33, 29, 32, 33, 12, 25, 32, 25, 30, 30, 35, 35, 25,
-                         33, 32, 30, 30, 20, 35, 35, 11, 31, 24, 29, 28, 35,
-                         28, 35, 32, 35, 33, 20, 20, 20, 35])),
-    "GAPC_0015:6:1:1314:13295#0/1":
-    dict(seq="AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT",
-         qual=array([35, 12, 35, 35, 28, 28, 36, 36, 36, 36, 36, 33, 33, 25,
-                     36, 32, 20, 32, 32, 32, 34, 12, 25, 20, 28, 32, 33, 32,
-                     32, 32, 34, 26, 35, 35, 35])),
-    "GAPC_0015:6:1:1317:3403#0/1":
-    dict(seq="TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG",
-         # had to add space in qual line
-         qual=array([28, 28, 28, 26, 20, 25, 20, 19, 33, 12, 34, 34, 32, 32,
-                     28, 31, 21, 26, 31, 34, 34, 35, 35, 32, 35, 35, 30, 27,
-                     33, 35, 28, 33, 28, 20, 35]))
-
-}
-
-
-DATA_2 = {
-    "GAPC_0017:6:1:1259:10413#0/1":
-    dict(seq='AACACCAAACTTCTCCACCACGTGAGCTACAAAAG',
-         qual=array([63, 63, 63, 63, 56, 61, 51, 60, 63, 60, 66, 61, 66, 64,
-                     65, 66, 64, 66, 66, 63, 61, 43, 65, 61, 66, 66, 56, 51,
-                     59, 51, 59, 56, 59, 54, 37])),
-    "GAPC_0015:6:1:1283:11957#0/1":
-    dict(seq='TATGTATATATAACATATACATATATACATACATA',
-         qual=array([60, 42, 57, 58, 47, 56, 60, 62, 58, 56, 56, 61, 63, 63,
-                     63, 64, 66, 61, 59, 59, 63, 65, 51, 63, 63, 66, 63, 59,
-                     64, 51, 63, 63, 65, 65, 65]))
-}
-
-
-FASTQ_EXAMPLE = r"""@GAPC_0015:6:1:1259:10413#0/1
-AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
-+GAPC_0015:6:1:1259:10413#0/1
-````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
- at GAPC_0015:6:1:1283:11957#0/1
-TATGTATATATAACATATACATATATACATACATA
-+GAPC_0015:6:1:1283:11957#0/1
-]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
- at GAPC_0015:6:1:1284:10484#0/1
-TCAGTTTTCCTCGCCATATTTCACGTCCTAAAGCG
-+GAPC_0015:6:1:1284:10484#0/1
-UM_]]U_]Z_Y^\^^``Y]`^SZ]\Ybb`^_LbL_
- at GAPC_0015:6:1:1287:17135#0/1
-TGTGCCTATGGAAGCAGTTCTAGGATCCCCTAGAA
-+GAPC_0015:6:1:1287:17135#0/1
-^aacccL\ccc\c\cTKTS]KZ\]]I\[Wa^T`^K
- at GAPC_0015:6:1:1293:3171#0/1
-AAAGAAAGGAAGAAAAGAAAAAGAAACCCGAGTTA
-+GAPC_0015:6:1:1293:3171#0/1
-b`bbbU_[YYcadcda_LbaaabWbaacYcc`a^c
- at GAPC_0015:6:1:1297:10729#0/1
-TAATGCCAAAGAAATATTTCCAAACTACATGCTTA
-+GAPC_0015:6:1:1297:10729#0/1
-T\ccLbb``bacc]_cacccccLccc\ccTccYL^
- at GAPC_0015:6:1:1299:5940#0/1
-AATCAAGAAATGAAGATTTATGTATGTGAAGAATA
-+GAPC_0015:6:1:1299:5940#0/1
-dcddbcfffdfffd`dd`^`c`Oc`Ybb`^eecde
- at GAPC_0015:6:1:1308:6996#0/1
-TGGGACACATGTCCATGCTGTGGTTTTAACCGGCA
-+GAPC_0015:6:1:1308:6996#0/1
-a]`aLY`Y^^ccYa`^^TccK_X]\c\c`caTTTc
- at GAPC_0015:6:1:1314:13295#0/1
-AATATTGCTTTGTCTGAACGATAGTGCTCTTTGAT
-+GAPC_0015:6:1:1314:13295#0/1
-cLcc\\dddddaaYd`T```bLYT\`a```bZccc
- at GAPC_0015:6:1:1317:3403#0/1
-TTGTTTCCACTTGGTTGATTTCACCCCTGAGTTTG
-+GAPC_0015:6:1:1317:3403#0/1
-\\\ZTYTSaLbb``\_UZ_bbcc`cc^[ac\a\Tc"""
-
-
-FASTQ_EXAMPLE_2 = r"""@GAPC_0017:6:1:1259:10413#0/1
-AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
-+GAPC_0015:6:1:1259:10413#0/1
-````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
- at GAPC_0015:6:1:1283:11957#0/1
-TATGTATATATAACATATACATATATACATACATA
-+GAPC_0015:6:1:1283:11957#0/1
-]KZ[PY]_[YY^```ac^\\`bT``c`\aT``bbb
-"""
-
-
-FASTQ_EXAMPLE_3 = r"""@GAPC_0017:6:1:1259:10413#0/1
-AACACCAAACTTCTCCACCACGTGAGCTACAAAAG
-+GAPC_0015:6:1:1259:10413#0/1
-````Y^T]`]c^cabcacc`^Lb^ccYT\T\Y\WF
- at GAPC_0015:6:1:1283:11957#0/1
-"""
-
-
-if __name__ == "__main__":
-    main()
diff --git a/skbio/parse/sequences/tests/test_iterator.py b/skbio/parse/sequences/tests/test_iterator.py
deleted file mode 100644
index fc84d6c..0000000
--- a/skbio/parse/sequences/tests/test_iterator.py
+++ /dev/null
@@ -1,336 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from unittest import TestCase, main
-
-from six import StringIO
-from numpy import arange, array
-
-from skbio import SequenceIterator, FastaIterator, FastqIterator
-
-
-class SeqIterTests(TestCase):
-    def setUp(self):
-        self.seq_ok = {'SequenceID': 'foo',
-                       'Sequence': 'AATTGGCC',
-                       'QualID': None,
-                       'Qual': None}
-
-        self.seqqual_ok = {'SequenceID': 'foo',
-                           'Sequence': 'AATTGGCC',
-                           'QualID': 'foo',
-                           'Qual': arange(8)}
-
-        self.seq_bad = {'SequenceID': 'foo',
-                        'Sequence': 'AATT  GGCC',
-                        'QualID': None,
-                        'Qual': None}
-
-        self.seqqual_bad_id = {'SequenceID': 'foo',
-                               'Sequence': 'AATTGGCC',
-                               'QualID': 'bar',
-                               'Qual': arange(8)}
-
-        self.seqqual_bad_qual = {'SequenceID': 'foo',
-                                 'Sequence': 'AATTGGCC',
-                                 'QualID': 'foo',
-                                 'Qual': arange(5)}
-
-        def rev_f(st):
-            st['Sequence'] = st['Sequence'][::-1]
-            st['Qual'] = st['Qual'][::-1] if st['Qual'] is not None else None
-
-        self.rev_f = rev_f
-
-    def test_validate_ids_true(self):
-        wk = SequenceIterator(['aattgg'], valid_id=True)
-
-        wk.state = self.seq_ok.copy()
-        wk.validate_ids()
-        self.assertFalse(wk.failed)
-
-        wk.state = self.seqqual_ok.copy()
-        wk.validate_ids()
-        self.assertFalse(wk.failed)
-
-        wk.state = self.seqqual_bad_id.copy()
-        wk.validate_ids()
-        self.assertTrue(wk.failed)
-
-    def test_validate_ids_false(self):
-        wk = SequenceIterator(['aattgg'], valid_id=False)
-
-        wk.state = self.seq_ok.copy()
-        wk.validate_ids()
-        self.assertFalse(wk.failed)
-
-        wk.state = self.seqqual_ok.copy()
-        wk.validate_ids()
-        self.assertFalse(wk.failed)
-
-        wk.state = self.seqqual_bad_id.copy()
-        wk.validate_ids()
-        self.assertFalse(wk.failed)
-
-    def test_validate_lengths_true(self):
-        wk = SequenceIterator(['aattgg'], valid_length=True)
-
-        wk.state = self.seq_ok.copy()
-        wk.valid_lengths()
-        self.assertFalse(wk.failed)
-
-        wk.state = self.seqqual_ok.copy()
-        wk.valid_lengths()
-        self.assertFalse(wk.failed)
-
-        wk.state = self.seqqual_bad_qual.copy()
-        wk.valid_lengths()
-        self.assertTrue(wk.failed)
-
-    def test_validate_lengths_false(self):
-        wk = SequenceIterator(['aattgg'], valid_length=False)
-
-        wk.state = self.seq_ok.copy()
-        wk.valid_lengths()
-        self.assertFalse(wk.failed)
-
-        wk.state = self.seqqual_ok.copy()
-        wk.valid_lengths()
-        self.assertFalse(wk.failed)
-
-        wk.state = self.seqqual_bad_qual.copy()
-        wk.valid_lengths()
-        self.assertFalse(wk.failed)
-
-    def test_transform(self):
-        wk = SequenceIterator(['aattgg'], transform=self.rev_f)
-
-        wk.state = self.seqqual_ok.copy()
-        self.assertEqual(wk.state['Sequence'], self.seqqual_ok['Sequence'])
-        wk.transform()
-        self.assertEqual(wk.state['Sequence'],
-                         self.seqqual_ok['Sequence'][::-1])
-        self.assertTrue((wk.state['Qual'] ==
-                         self.seqqual_ok['Qual'][::-1]).all())
-
-    def test_passing_none_for_seqs(self):
-        with self.assertRaises(ValueError):
-            SequenceIterator(None)
-
-
-class FastaTests(TestCase):
-    def setUp(self):
-        self.fastas = [StringIO(fasta1), StringIO(fasta2), StringIO(fasta3)]
-        self.quals = [StringIO(qual1), StringIO(qual2), StringIO(qual3)]
-
-        self.bad_qual_val = [StringIO(qual1), StringIO(qual_bad_val),
-                             StringIO(qual3)]
-        self.bad_qual_id = [StringIO(qual1), StringIO(qual_bad_id),
-                            StringIO(qual3)]
-
-    def test_fasta_gen(self):
-        wk = FastaIterator(seq=self.fastas)
-        gen = wk()
-
-        exp1 = {'SequenceID': '1', 'Sequence': 'aattggcc', 'Qual': None,
-                'QualID': None}
-        exp2 = {'SequenceID': '2', 'Sequence': 'aattaatt', 'Qual': None,
-                'QualID': None}
-        exp3 = {'SequenceID': '3', 'Sequence': 'atat', 'Qual': None,
-                'QualID': None}
-        exp4 = {'SequenceID': '4', 'Sequence': 'attatt', 'Qual': None,
-                'QualID': None}
-        exp5 = {'SequenceID': '5', 'Sequence': 'ggccc', 'Qual': None,
-                'QualID': None}
-
-        obs1 = next(gen)
-        self.assertEqual(obs1, exp1)
-        self.assertFalse(wk.failed)
-
-        obs2 = next(gen)
-        self.assertEqual(obs2, exp2)
-        self.assertFalse(wk.failed)
-
-        obs3 = next(gen)
-        self.assertEqual(obs3, exp3)
-        self.assertFalse(wk.failed)
-
-        obs4 = next(gen)
-        self.assertEqual(obs4, exp4)
-        self.assertFalse(wk.failed)
-
-        obs5 = next(gen)
-        self.assertEqual(obs5, exp5)
-        self.assertFalse(wk.failed)
-
-    def test_fasta_qual(self):
-        wk = FastaIterator(seq=self.fastas, qual=self.quals)
-        gen = wk()
-
-        exp1 = {'SequenceID': '1', 'Sequence': 'aattggcc',
-                'Qual': arange(1, 9), 'QualID': '1'}
-        exp2 = {'SequenceID': '2', 'Sequence': 'aattaatt', 'QualID': '2',
-                'Qual': arange(1, 9)[::-1]}
-        exp3 = {'SequenceID': '3', 'Sequence': 'atat', 'Qual': arange(1, 5),
-                'QualID': '3'}
-        exp4 = {'SequenceID': '4', 'Sequence': 'attatt', 'Qual': arange(1, 7),
-                'QualID': '4'}
-        exp5 = {'SequenceID': '5', 'Sequence': 'ggccc', 'Qual': arange(1, 6),
-                'QualID': '5'}
-
-        obs1 = next(gen)
-        self.assertTrue((obs1['Qual'] == exp1['Qual']).all())
-        obs1.pop('Qual')
-        exp1.pop('Qual')
-        self.assertEqual(obs1, exp1)
-        self.assertFalse(wk.failed)
-
-        obs2 = next(gen)
-        self.assertTrue((obs2['Qual'] == exp2['Qual']).all())
-        obs2.pop('Qual')
-        exp2.pop('Qual')
-        self.assertEqual(obs2, exp2)
-        self.assertFalse(wk.failed)
-
-        obs3 = next(gen)
-        self.assertTrue((obs3['Qual'] == exp3['Qual']).all())
-        obs3.pop('Qual')
-        exp3.pop('Qual')
-        self.assertEqual(obs3, exp3)
-        self.assertFalse(wk.failed)
-
-        obs4 = next(gen)
-        self.assertTrue((obs4['Qual'] == exp4['Qual']).all())
-        obs4.pop('Qual')
-        exp4.pop('Qual')
-        self.assertEqual(obs4, exp4)
-        self.assertFalse(wk.failed)
-
-        obs5 = next(gen)
-        self.assertTrue((obs5['Qual'] == exp5['Qual']).all())
-        obs5.pop('Qual')
-        exp5.pop('Qual')
-        self.assertEqual(obs5, exp5)
-        self.assertFalse(wk.failed)
-
-    def test_fasta_badqual_val(self):
-        wk = FastaIterator(seq=self.fastas, qual=self.bad_qual_val)
-        gen = wk()
-
-        # default behavior is to sliently ignore
-        exp_ids = ['1', '2', '4', '5']
-        obs_ids = [r['SequenceID'] for r in gen]
-
-        self.assertEqual(obs_ids, exp_ids)
-
-    def test_fasta_badqual_id(self):
-        wk = FastaIterator(seq=self.fastas, qual=self.bad_qual_id)
-        gen = wk()
-
-        # default behavior is to sliently ignore
-        exp_ids = ['1', '2', '4', '5']
-        obs_ids = [r['SequenceID'] for r in gen]
-
-        self.assertEqual(obs_ids, exp_ids)
-
-
-class FastqTests(TestCase):
-    def setUp(self):
-        self.fastqs = [StringIO(fastq1), StringIO(fastq2)]
-
-    def test_fastq_gen(self):
-        wk = FastqIterator(seq=self.fastqs)
-        gen = wk()
-
-        exp1 = {'SequenceID': '1', 'Sequence': 'atat', 'QualID': '1',
-                'Qual': array([32, 33, 34, 35])}
-        exp2 = {'SequenceID': '2', 'Sequence': 'atgc', 'QualID': '2',
-                'Qual': array([33, 34, 35, 36])}
-        exp3 = {'SequenceID': '3', 'Sequence': 'taa', 'QualID': '3',
-                'Qual': array([36, 37, 38])}
-
-        obs1 = next(gen)
-        self.assertTrue((obs1['Qual'] == exp1['Qual']).all())
-        obs1.pop('Qual')
-        exp1.pop('Qual')
-        self.assertEqual(obs1, exp1)
-
-        obs2 = next(gen)
-        self.assertTrue((obs2['Qual'] == exp2['Qual']).all())
-        obs2.pop('Qual')
-        exp2.pop('Qual')
-        self.assertEqual(obs2, exp2)
-
-        obs3 = next(gen)
-        self.assertTrue((obs3['Qual'] == exp3['Qual']).all())
-        obs3.pop('Qual')
-        exp3.pop('Qual')
-        self.assertEqual(obs3, exp3)
-
-
-fasta1 = """>1
-aattggcc
->2
-aattaatt
-"""
-
-fasta2 = """>3
-atat
-"""
-
-fasta3 = """>4
-attatt
->5
-ggccc
-"""
-
-qual1 = """>1
-1 2 3 4 5 6 7 8
->2
-8 7 6 5 4 3 2 1
-"""
-
-qual2 = """>3
-1 2 3 4
-"""
-
-qual3 = """>4
-1 2 3 4 5 6
->5
-1 2 3 4 5
-"""
-
-qual_bad_val = """>3
-1 2
-"""
-
-qual_bad_id = """>asdasd
-1 2 3 4
-"""
-
-fastq1 = """@1
-atat
-+
-ABCD
- at 2
-atgc
-+
-BCDE
-"""
-
-fastq2 = """@3
-taa
-+
-EFG
-"""
-
-
-if __name__ == '__main__':
-    main()
diff --git a/skbio/parse/tests/__init__.py b/skbio/parse/tests/__init__.py
deleted file mode 100644
index c99682c..0000000
--- a/skbio/parse/tests/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
diff --git a/skbio/parse/tests/test_record.py b/skbio/parse/tests/test_record.py
deleted file mode 100644
index 92a4188..0000000
--- a/skbio/parse/tests/test_record.py
+++ /dev/null
@@ -1,550 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from unittest import TestCase, main
-
-from skbio.parse.record import (DelimitedSplitter,
-                                GenericRecord, MappedRecord, TypeSetter,
-                                list_adder, dict_adder,
-                                LineOrientedConstructor, int_setter,
-                                bool_setter, string_and_strip, FieldWrapper,
-                                StrictFieldWrapper, raise_unknown_field,
-                                FieldMorpher)
-from skbio.io import FieldError
-
-
-class recordsTests(TestCase):
-
-    """Tests of top-level functionality in records."""
-
-    def test_string_and_strip(self):
-        """string_and_strip should convert all items to strings and strip them
-        """
-        self.assertEqual(string_and_strip(), [])
-        self.assertEqual(string_and_strip('\t', ' ', '\n\t'), ['', '', ''])
-        self.assertEqual(string_and_strip('\ta\tb', 3, '   cde   e', None),
-                         ['a\tb', '3', 'cde   e', 'None'])
-
-    def test_raise_unknown_field(self):
-        """raise_unknown_field should always raise FieldError"""
-        self.assertRaises(FieldError, raise_unknown_field, 'xyz', 123)
-
-
-class DelimitedSplitterTests(TestCase):
-
-    """Tests of the DelimitedSplitter factory function."""
-
-    def test_parsers(self):
-        """DelimitedSplitter should return function with correct behavior"""
-        empty = DelimitedSplitter()
-        space = DelimitedSplitter(None)
-        semicolon = DelimitedSplitter(';')
-        twosplits = DelimitedSplitter(';', 2)
-        allsplits = DelimitedSplitter(';', None)
-        lastone = DelimitedSplitter(';', -1)
-        lasttwo = DelimitedSplitter(';', -2)
-
-        self.assertEqual(empty('a   b  c'), ['a', 'b  c'])
-        self.assertEqual(empty('abc'), ['abc'])
-        self.assertEqual(empty('   '), [])
-
-        self.assertEqual(empty('a  b  c'), space('a  b  c'))
-        self.assertEqual(semicolon('  a  ; b   ;  c  d'), ['a', 'b   ;  c  d'])
-        self.assertEqual(twosplits('  a  ; b   ;  c  d'), ['a', 'b', 'c  d'])
-        self.assertEqual(allsplits(' a ;  b  ; c;;d;e  ;'),
-                         ['a', 'b', 'c', '', 'd', 'e', ''])
-        self.assertEqual(lastone(' a ;  b  ; c;;d;e  ;'),
-                         ['a ;  b  ; c;;d;e', ''])
-        self.assertEqual(lasttwo(' a ;  b  ; c;;d;e  ;'),
-                         ['a ;  b  ; c;;d', 'e', ''])
-        self.assertEqual(lasttwo(''), [])
-        self.assertEqual(lasttwo('x'), ['x'])
-        self.assertEqual(lasttwo('x;'), ['x', ''])
-
-
-class GenericRecordTests(TestCase):
-
-    """Tests of the GenericRecord class"""
-    class gr(GenericRecord):
-        Required = {'a': 'x', 'b': [], 'c': {}}
-
-    def test_init(self):
-        """GenericRecord init should work OK empty or with data"""
-        self.assertEqual(GenericRecord(), {})
-        self.assertEqual(GenericRecord({'a': 1}), {'a': 1})
-        assert isinstance(GenericRecord(), GenericRecord)
-
-    def test_init_subclass(self):
-        """GenericRecord subclass init should include required data"""
-        self.assertEqual(self.gr(), {'a': 'x', 'b': [], 'c': {}})
-        self.assertEqual(self.gr({'a': []}), {'a': [], 'b': [], 'c': {}})
-        assert isinstance(self.gr(), self.gr)
-        assert isinstance(self.gr(), GenericRecord)
-
-    def test_delitem(self):
-        """GenericRecord delitem should fail if item required"""
-        g = self.gr()
-        g['d'] = 3
-        self.assertEqual(g, {'a': 'x', 'b': [], 'c': {}, 'd': 3})
-        del g['d']
-        self.assertEqual(g, {'a': 'x', 'b': [], 'c': {}})
-        self.assertRaises(AttributeError, g.__delitem__, 'a')
-        g['c'][3] = 4
-        self.assertEqual(g['c'], {3: 4})
-
-    def test_copy(self):
-        """GenericRecord copy should include attributes and set correct class
-        """
-        g = self.gr()
-        g['a'] = 'abc'
-        g.X = 'y'
-        h = g.copy()
-        self.assertEqual(g, h)
-        assert isinstance(h, self.gr)
-        self.assertEqual(h.X, 'y')
-        self.assertEqual(h, {'a': 'abc', 'b': [], 'c': {}})
-
-
-class MappedRecordTests(TestCase):
-
-    """Tests of the MappedRecord class"""
-
-    def setUp(self):
-        """Define a few standard MappedRecords"""
-        self.empty = MappedRecord()
-        self.single = MappedRecord({'a': 3})
-        self.several = MappedRecord(a=4, b=5, c='a', d=[1, 2, 3])
-
-    def test_init_empty(self):
-        """MappedRecord empty init should work OK"""
-        g = MappedRecord()
-        self.assertEqual(g, {})
-
-    def test_init_data(self):
-        """MappedRecord should work like normal dict init"""
-        exp = {'a': 3, 'b': 4}
-        self.assertEqual(MappedRecord({'a': 3, 'b': 4}), exp)
-        self.assertEqual(MappedRecord(a=3, b=4), exp)
-        self.assertEqual(MappedRecord([['a', 3], ['b', 4]]), exp)
-
-    def test_init_subclass(self):
-        """MappedRecord subclasses should behave as expected"""
-        class rec(MappedRecord):
-            Required = {'a': {}, 'b': 'xyz', 'c': 3}
-            Aliases = {'B': 'b'}
-
-        r = rec()
-        self.assertEqual(r, {'a': {}, 'b': 'xyz', 'c': 3})
-        # test that subclassing is correct
-        s = r.copy()
-        assert isinstance(s, rec)
-        # test Aliases
-        s.B = 0
-        self.assertEqual(s, {'a': {}, 'b': 0, 'c': 3})
-        # test Required
-        try:
-            del s.B
-        except AttributeError:
-            pass
-        else:
-            raise AssertionError("Subclass failed to catch requirement")
-
-    def test_getattr(self):
-        """MappedRecord getattr should look in dict after real attrs"""
-        s = self.several
-        self.assertEqual(s.Aliases, {})
-        self.assertEqual(s.a, 4)
-        self.assertEqual(s.d, [1, 2, 3])
-        for key in s:
-            self.assertEqual(getattr(s, key), s[key])
-        assert 'xyz' not in s
-        self.assertEqual(s.xyz, None)
-        self.assertEqual(s['xyz'], None)
-        s.Aliases = {'xyz': 'a'}
-        self.assertEqual(s['xyz'], 4)
-
-    def test_setattr(self):
-        """MappedRecord setattr should add to dict"""
-        s = self.single
-        # check that we haven't screwed up normal attribute setting
-        assert 'Aliases' not in s
-        s.Aliases = {'x': 'y'}
-        assert 'Aliases' not in s
-        self.assertEqual(s.Aliases, {'x': 'y'})
-        s.x = 5
-        assert 'x' in s
-        self.assertEqual(s['x'], 5)
-        self.assertEqual(s.x, 5)
-        s.Aliases = {'XYZ': 'b'}
-        s.XYZ = 3
-        self.assertEqual(s.b, 3)
-
-    def test_delattr(self):
-        """MappedRecord delattr should work for 'normal' and other attributes
-        """
-        s = self.single
-        s.__dict__['x'] = 'y'
-        assert 'x' not in s
-        self.assertEqual(s.x, 'y')
-        del s.x
-        self.assertEqual(s.x, None)
-        self.assertEqual(s, {'a': 3})
-        # try it for an internal attribute: check it doesn't delete anything
-        # else
-        s.b = 4
-        self.assertEqual(s, {'a': 3, 'b': 4})
-        del s.a
-        self.assertEqual(s, {'b': 4})
-        del s.abc
-        self.assertEqual(s, {'b': 4})
-        s.Required = {'b': True}
-        try:
-            del s.b
-        except AttributeError:
-            pass
-        else:
-            raise AssertionError("Allowed deletion of required attribute""")
-        s.a = 3
-        self.assertEqual(s.a, 3)
-        s.Aliases = {'xyz': 'a'}
-        del s.xyz
-        self.assertEqual(s.a, None)
-
-    def test_getitem(self):
-        """MappedRecord getitem should work only for keys, not attributes"""
-        s = self.single
-        self.assertEqual(s['Required'], None)
-        self.assertEqual(s['a'], 3)
-        self.assertEqual(s['xyz'], None)
-        self.assertEqual(s[list('abc')], None)
-        s.Aliases = {'xyz': 'a'}
-        self.assertEqual(s['xyz'], 3)
-
-    def test_setitem(self):
-        """MappedRecord setitem should work only for keys, not attributes"""
-        s = self.single
-        s['Required'] = None
-        self.assertEqual(s, {'a': 3, 'Required': None})
-        self.assertEqual(s.Required, {})
-        self.assertNotEqual(s.Required, None)
-        s['c'] = 5
-        self.assertEqual(s, {'a': 3, 'c': 5, 'Required': None})
-        # still not allowed unhashable objects as keys
-        self.assertRaises(TypeError, s.__setitem__, range(3))
-        s.Aliases = {'C': 'c'}
-        s['C'] = 3
-        self.assertEqual(s, {'a': 3, 'c': 3, 'Required': None})
-
-    def test_delitem(self):
-        """MappedRecord delitem should only work for keys, not attributes"""
-        s = self.single
-        del s['Required']
-        self.assertEqual(s.Required, {})
-        s.Required = {'a': True}
-        try:
-            del s['a']
-        except AttributeError:
-            pass
-        else:
-            raise AssertionError("Allowed deletion of required item")
-        s.Aliases = {'B': 'b'}
-        s.b = 5
-        self.assertEqual(s.b, 5)
-        del s.B
-        self.assertEqual(s.b, None)
-
-    def test_contains(self):
-        """MappedRecord contains should use aliases, but not apply to attrs"""
-        s = self.single
-        assert 'a' in s
-        assert 'b' not in s
-        s.b = 5
-        assert 'b' in s
-        assert 'Required' not in s
-        assert 'A' not in s
-        s.Aliases = {'A': 'a'}
-        assert 'A' in s
-
-    def test_get(self):
-        """MappedRecord get should be typesafe against unhashables"""
-        s = self.single
-        self.assertEqual(s.get(1, 6), 6)
-        self.assertEqual(s.get('a', 'xyz'), 3)
-        self.assertEqual(s.get('ABC', 'xyz'), 'xyz')
-        s.Aliases = {'ABC': 'a'}
-        self.assertEqual(s.get('ABC', 'xyz'), 3)
-        self.assertEqual(s.get([1, 2, 3], 'x'), 'x')
-
-    def test_setdefault(self):
-        """MappedRecord setdefault should not be typesafe against unhashables
-        """
-        s = self.single
-        x = s.setdefault('X', 'xyz')
-        self.assertEqual(x, 'xyz')
-        self.assertEqual(s, {'a': 3, 'X': 'xyz'})
-        self.assertRaises(TypeError, s.setdefault, ['a', 'b'], 'xyz')
-
-    def test_update(self):
-        """MappedRecord update should transparently convert keys"""
-        s = self.single
-        s.b = 999
-        s.Aliases = {'XYZ': 'x', 'ABC': 'a'}
-        d = {'ABC': 111, 'CVB': 222}
-        s.update(d)
-        self.assertEqual(s, {'a': 111, 'b': 999, 'CVB': 222})
-
-    def test_copy(self):
-        """MappedRecord copy should return correct class"""
-        s = self.single
-        t = s.copy()
-        assert isinstance(t, MappedRecord)
-        s.Aliases = {'XYZ': 'x'}
-        u = s.copy()
-        u.Aliases['ABC'] = 'a'
-        self.assertEqual(s.Aliases, {'XYZ': 'x'})
-        self.assertEqual(t.Aliases, {})
-        self.assertEqual(u.Aliases, {'XYZ': 'x', 'ABC': 'a'})
-
-    def test_subclass(self):
-        """MappedRecord subclassing should work correctly"""
-        class ret3(MappedRecord):
-            DefaultValue = 3
-            ClassData = 'xyz'
-
-        x = ret3({'ABC': 777, 'DEF': '999'})
-        self.assertEqual(x.ZZZ, 3)
-        self.assertEqual(x.ABC, 777)
-        self.assertEqual(x.DEF, '999')
-        self.assertEqual(x.ClassData, 'xyz')
-        x.ZZZ = 6
-        self.assertEqual(x.ZZZ, 6)
-        self.assertEqual(x.ZZ, 3)
-        x.ClassData = 'qwe'
-        self.assertEqual(x.ClassData, 'qwe')
-        self.assertEqual(ret3.ClassData, 'xyz')
-
-    def test_DefaultValue(self):
-        """MappedRecord DefaultValue should give new copy when requested"""
-        class m(MappedRecord):
-            DefaultValue = []
-
-        a = m()
-        b = m()
-        assert a['abc'] is not b['abc']
-        assert a['abc'] == b['abc']
-
-
-class dummy(object):
-
-    """Do-nothing class whose attributes can be freely abused."""
-    pass
-
-
-class TypeSetterTests(TestCase):
-
-    """Tests of the TypeSetter class"""
-
-    def test_setter_empty(self):
-        """TypeSetter should set attrs to vals on empty init"""
-        d = dummy()
-        ident = TypeSetter()
-        ident(d, 'x', 'abc')
-        self.assertEqual(d.x, 'abc')
-        ident(d, 'y', 3)
-        self.assertEqual(d.y, 3)
-        ident(d, 'x', 2)
-        self.assertEqual(d.x, 2)
-
-    def test_setter_typed(self):
-        """TypeSetter should set attrs to constructor(val) when specified"""
-        d = dummy()
-        i = TypeSetter(int)
-        i(d, 'zz', 3)
-        self.assertEqual(d.zz, 3)
-        i(d, 'xx', '456')
-        self.assertEqual(d.xx, 456)
-
-
-class TypeSetterLikeTests(TestCase):
-
-    """Tests of the functions that behave similarly to TypeSetter products"""
-
-    def test_list_adder(self):
-        """list_adder should add items to list, creating if necessary"""
-        d = dummy()
-        list_adder(d, 'x', 3)
-        self.assertEqual(d.x, [3])
-        list_adder(d, 'x', 'abc')
-        self.assertEqual(d.x, [3, 'abc'])
-        list_adder(d, 'y', [2, 3])
-        self.assertEqual(d.x, [3, 'abc'])
-        self.assertEqual(d.y, [[2, 3]])
-
-    def test_dict_adder(self):
-        """dict_adder should add items to dict, creating if necessary"""
-        d = dummy()
-        dict_adder(d, 'x', 3)
-        self.assertEqual(d.x, {3: None})
-        dict_adder(d, 'x', 'ab')
-        self.assertEqual(d.x, {3: None, 'a': 'b'})
-        dict_adder(d, 'x', ['a', 0])
-        self.assertEqual(d.x, {3: None, 'a': 0})
-        dict_adder(d, 'y', None)
-        self.assertEqual(d.x, {3: None, 'a': 0})
-        self.assertEqual(d.y, {None: None})
-
-
-class LineOrientedConstructorTests(TestCase):
-
-    """Tests of the LineOrientedConstructor class"""
-
-    def test_init_empty(self):
-        """LOC empty init should succeed with expected defaults"""
-        l = LineOrientedConstructor()
-        self.assertEqual(l.Lines, [])
-        self.assertEqual(l.LabelSplitter(' ab  cd  '), ['ab', 'cd'])
-        self.assertEqual(l.FieldMap, {})
-        self.assertEqual(l.Constructor, MappedRecord)
-        self.assertEqual(l.Strict, False)
-
-    def test_empty_LOC(self):
-        """LOC empty should fail if strict, fill fields if not strict"""
-        data = ["abc   def", "3  n", "\t  abc   \txyz\n\n", "fgh   "]
-        l = LineOrientedConstructor()
-        result = l()
-        self.assertEqual(result, {})
-        result = l([])
-        self.assertEqual(result, {})
-        result = l(['   ', '\n\t   '])
-        self.assertEqual(result, {})
-        result = l(data)
-        self.assertEqual(result, {'abc': 'xyz', '3': 'n', 'fgh': None})
-
-    def test_full_LOC(self):
-        """LOC should behave as expected when initialized with rich data"""
-        data = ["abc\t def", " 3 \t n", "  abc   \txyz\n\n", "x\t5", "fgh   ",
-                "x\t3    "]
-
-        class rec(MappedRecord):
-            Required = {'abc': []}
-        maps = {'abc': list_adder, 'x': int_setter, 'fgh': bool_setter}
-        label_splitter = DelimitedSplitter('\t')
-        constructor = rec
-        strict = True
-        loc_bad = LineOrientedConstructor(data, label_splitter, maps,
-                                          constructor, strict)
-        self.assertRaises(FieldError, loc_bad)
-        strict = False
-        loc_good = LineOrientedConstructor(data, label_splitter, maps,
-                                           constructor, strict)
-        result = loc_good()
-        assert isinstance(result, rec)
-        self.assertEqual(result,
-                         {'abc': ['def', 'xyz'], '3': 'n',
-                          'fgh': False, 'x': 3})
-
-
-class fake_dict(dict):
-
-    """Test that constructors return the correct subclass"""
-    pass
-
-
-class FieldWrapperTests(TestCase):
-
-    """Tests of the FieldWrapper factory function"""
-
-    def test_default(self):
-        """Default FieldWrapper should wrap fields and labels"""
-        fields = list('abcde')
-        f = FieldWrapper(fields)
-        self.assertEqual(f(''), {})
-        self.assertEqual(f('xy za '), {'a': 'xy', 'b': 'za'})
-        self.assertEqual(f('1   2\t\t 3  \n4 5 6'),
-                         {'a': '1', 'b': '2', 'c': '3', 'd': '4', 'e': '5'})
-
-    def test_splitter(self):
-        """FieldWrapper with splitter should use that splitter"""
-        fields = ['label', 'count']
-        splitter = DelimitedSplitter(':', -1)
-        f = FieldWrapper(fields, splitter)
-        self.assertEqual(f(''), {})
-        self.assertEqual(f('nknasd:'), {'label': 'nknasd', 'count': ''})
-        self.assertEqual(
-            f('n:k:n:a:sd  '),
-            {'label': 'n:k:n:a',
-             'count': 'sd'})
-
-    def test_constructor(self):
-        """FieldWrapper with constructor should use that constructor"""
-        fields = list('abc')
-        f = FieldWrapper(fields, constructor=fake_dict)
-        self.assertEqual(f('x y'), {'a': 'x', 'b': 'y'})
-        assert isinstance(f('x y'), fake_dict)
-
-
-class StrictFieldWrapperTests(TestCase):
-
-    """Tests of the StrictFieldWrapper factory function"""
-
-    def test_default(self):
-        """Default StrictFieldWrapper should wrap fields if count correct"""
-        fields = list('abcde')
-        f = StrictFieldWrapper(fields)
-        self.assertEqual(f('1   2\t\t 3  \n4 5 '),
-                         {'a': '1', 'b': '2', 'c': '3', 'd': '4', 'e': '5'})
-        self.assertRaises(FieldError, f, '')
-        self.assertRaises(FieldError, f, 'xy za ')
-
-    def test_splitter(self):
-        """StrictFieldWrapper with splitter should use that splitter"""
-        fields = ['label', 'count']
-        splitter = DelimitedSplitter(':', -1)
-        f = StrictFieldWrapper(fields, splitter)
-        self.assertEqual(
-            f('n:k:n:a:sd  '),
-            {'label': 'n:k:n:a',
-             'count': 'sd'})
-        self.assertEqual(f('nknasd:'), {'label': 'nknasd', 'count': ''})
-        self.assertRaises(FieldError, f, '')
-
-    def test_constructor(self):
-        """StrictFieldWrapper with constructor should use that constructor"""
-        fields = list('ab')
-        f = StrictFieldWrapper(fields, constructor=fake_dict)
-        self.assertEqual(f('x y'), {'a': 'x', 'b': 'y'})
-        assert isinstance(f('x y'), fake_dict)
-
-
-class FieldMorpherTests(TestCase):
-
-    """Tests of the FieldMorpher class."""
-
-    def test_default(self):
-        """FieldMorpher default should use correct constructors"""
-        fm = FieldMorpher({'a': int, 'b': str})
-        self.assertEqual(fm({'a': '3', 'b': 456}), {'a': 3, 'b': '456'})
-
-    def test_default_error(self):
-        """FieldMorpher default should raise FieldError on unknown fields"""
-        fm = FieldMorpher({'a': int, 'b': str})
-        self.assertRaises(FieldError, fm, {'a': '3', 'b': 456, 'c': '4'})
-
-    def test_altered_default(self):
-        """FieldMorpher with default set should apply it"""
-        def func(x, y):
-            return str(x), float(y) - 0.5
-
-        fm = FieldMorpher({'3': str, 4: int}, func)
-        # check that recognized values aren't tampered with
-        self.assertEqual(fm({3: 3, 4: '4'}), {'3': '3', 4: 4})
-        # check that unrecognized values get the appropriate conversion
-        self.assertEqual(fm({3: 3, 5: '5'}), {'3': '3', '5': 4.5})
-
-if __name__ == '__main__':
-    main()
diff --git a/skbio/parse/tests/test_record_finder.py b/skbio/parse/tests/test_record_finder.py
deleted file mode 100644
index 77c543c..0000000
--- a/skbio/parse/tests/test_record_finder.py
+++ /dev/null
@@ -1,257 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from unittest import TestCase, main
-
-from skbio.io import RecordError
-from skbio.parse.record_finder import (DelimitedRecordFinder,
-                                       LabeledRecordFinder, LineGrouper,
-                                       TailedRecordFinder)
-
-
-class TailedRecordFinderTests(TestCase):
-    def setUp(self):
-        self.endswith_period = lambda x: x.endswith('.')
-        self.period_tail_finder = TailedRecordFinder(self.endswith_period)
-
-    def test_parsers(self):
-        lines = '>abc\ndef\nz.\n>efg\nz.'.split()
-        fl = self.period_tail_finder
-        self.assertEqual(list(fl(lines)),
-                         [['>abc', 'def', 'z.'], ['>efg', 'z.']])
-
-    def test_parsers_empty(self):
-        fl = self.period_tail_finder
-        self.assertEqual(list(fl(['  ', '\n'])), [])
-        self.assertEqual(list(fl([])), [])
-
-    def test_parsers_strip(self):
-        fl = self.period_tail_finder
-        lines = '>abc  \n \t def\n  z. \t\n>efg \nz.'.split('\n')
-        self.assertEqual(list(fl(lines)),
-                         [['>abc', ' \t def', '  z.'], ['>efg', 'z.']])
-
-    def test_parsers_leftover(self):
-        f = self.period_tail_finder
-        good = ['abc  \n',
-                'def\n',
-                '.\n',
-                'ghi \n',
-                'j.',
-                ]
-        blank = ['', '   ', '\t    \t\n\n']
-        bad = ['abc']
-
-        result = [['abc', 'def', '.'], ['ghi', 'j.']]
-
-        self.assertEqual(list(f(good)), result)
-        self.assertEqual(list(f(good + blank)), result)
-        self.assertRaises(RecordError, list, f(good + bad))
-
-        f2 = TailedRecordFinder(self.endswith_period, strict=False)
-        self.assertEqual(list(f2(good + bad)), result + [['abc']])
-
-    def test_parsers_ignore(self):
-        def never(line):
-            return False
-
-        def ignore_labels(line):
-            return (not line) or line.isspace() or line.startswith('#')
-
-        lines = ['abc', '\n', '1.', 'def', '#ignore', '2.']
-        self.assertEqual(list(TailedRecordFinder(self.endswith_period)(lines)),
-                         [['abc', '1.'], ['def', '#ignore', '2.']])
-        self.assertEqual(list(TailedRecordFinder(self.endswith_period,
-                                                 ignore=never)(lines)),
-                         [['abc', '', '1.'], ['def', '#ignore', '2.']])
-        self.assertEqual(list(TailedRecordFinder(self.endswith_period,
-                                                 ignore=ignore_labels)(lines)),
-                         [['abc', '1.'], ['def', '2.']])
-
-
-class DelimitedRecordFinderTests(TestCase):
-    def test_parsers(self):
-        lines = 'abc\ndef\n//\nefg\n//'.split()
-        self.assertEqual(list(DelimitedRecordFinder('//')(lines)),
-                         [['abc', 'def', '//'], ['efg', '//']])
-        self.assertEqual(list(DelimitedRecordFinder('//', keep_delimiter=False)
-                              (lines)),
-                         [['abc', 'def'], ['efg']])
-
-    def test_parsers_empty(self):
-        self.assertEqual(list(DelimitedRecordFinder('//')(['  ', '\n'])), [])
-        self.assertEqual(list(DelimitedRecordFinder('//')([])), [])
-
-    def test_parsers_strip(self):
-        lines = '  \t   abc  \n \t   def\n  // \t\n\t\t efg \n//'.split('\n')
-        self.assertEqual(list(DelimitedRecordFinder('//')(lines)),
-                         [['abc', 'def', '//'], ['efg', '//']])
-
-    def test_parsers_error(self):
-        good = ['  \t   abc  \n',
-                '\t   def\n',
-                '// \t\n',
-                '\t\n',
-                '\t efg \n',
-                '\t\t//\n',
-                ]
-        blank = ['', '   ', '\t    \t\n\n']
-        bad = ['abc']
-
-        result = [['abc', 'def', '//'], ['efg', '//']]
-        r = DelimitedRecordFinder('//')
-
-        self.assertEqual(list(r(good)), result)
-        self.assertEqual(list(r(good + blank)), result)
-        try:
-            list(r(good + bad))
-        except RecordError:
-            pass
-        else:
-            raise AssertionError("Parser failed to raise error on bad data")
-
-        r = DelimitedRecordFinder('//', strict=False)
-        self.assertEqual(list(r(good + bad)), result + [['abc']])
-
-    def test_parsers_ignore(self):
-        def never(line):
-            return False
-
-        def ignore_labels(line):
-            return (not line) or line.isspace() or line.startswith('#')
-
-        lines = ['>abc', '\n', '1', '$$', '>def', '#ignore', '2', '$$']
-        self.assertEqual(list(DelimitedRecordFinder('$$')(lines)),
-                         [['>abc', '1', '$$'], ['>def', '#ignore', '2', '$$']])
-        self.assertEqual(list(DelimitedRecordFinder('$$',
-                                                    ignore=never)(lines)),
-                         [['>abc', '', '1', '$$'],
-                          ['>def', '#ignore', '2', '$$']])
-        self.assertEqual(
-            list(DelimitedRecordFinder('$$', ignore=ignore_labels)(lines)),
-            [['>abc', '1', '$$'], ['>def', '2', '$$']])
-
-
-class LabeledRecordFinderTests(TestCase):
-    def setUp(self):
-        self.FastaLike = LabeledRecordFinder(lambda x: x.startswith('>'))
-
-    def test_parsers(self):
-        lines = '>abc\ndef\n//\n>efg\n//'.split()
-        fl = self.FastaLike
-        self.assertEqual(list(fl(lines)),
-                         [['>abc', 'def', '//'], ['>efg', '//']])
-
-    def test_parsers_empty(self):
-        fl = self.FastaLike
-        self.assertEqual(list(fl(['  ', '\n'])), [])
-        self.assertEqual(list(fl([])), [])
-
-    def test_parsers_strip(self):
-        fl = self.FastaLike
-        lines = '  \t   >abc  \n \t   def\n  // \t\n\t\t >efg \n//'.split('\n')
-        self.assertEqual(list(fl(lines)),
-                         [['>abc', 'def', '//'], ['>efg', '//']])
-
-    def test_parsers_leftover(self):
-        fl = self.FastaLike
-        good = ['  \t   >abc  \n',
-                '\t   def\n',
-                '\t\n',
-                '\t >efg \n',
-                'ghi',
-                ]
-        blank = ['', '   ', '\t    \t\n\n']
-        bad = ['>abc']
-
-        result = [['>abc', 'def'], ['>efg', 'ghi']]
-
-        self.assertEqual(list(fl(good)), result)
-        self.assertEqual(list(fl(good + blank)), result)
-        self.assertEqual(list(fl(good + bad)), result + [['>abc']])
-
-    def test_parsers_ignore(self):
-        def never(line):
-            return False
-
-        def ignore_labels(line):
-            return (not line) or line.isspace() or line.startswith('#')
-
-        def is_start(line):
-            return line.startswith('>')
-
-        lines = ['>abc', '\n', '1', '>def', '#ignore', '2']
-        self.assertEqual(list(LabeledRecordFinder(is_start)(lines)),
-                         [['>abc', '1'], ['>def', '#ignore', '2']])
-        self.assertEqual(list(LabeledRecordFinder(is_start,
-                                                  ignore=never)(lines)),
-                         [['>abc', '', '1'], ['>def', '#ignore', '2']])
-        self.assertEqual(list(LabeledRecordFinder(is_start,
-                                                  ignore=ignore_labels)(
-            lines)),
-            [['>abc', '1'], ['>def', '2']])
-
-    def test_constructor_is_none(self):
-        lrf = LabeledRecordFinder(lambda x: x.strip().startswith('>'),
-                                  constructor=None)
-        lines = '  \t   >abc  \n \t   def\n  // \t\n\t\t >efg \n//'.split('\n')
-
-        obs = list(lrf(lines))
-        exp = [['  \t   >abc  ', ' \t   def', '  // \t'], ['\t\t >efg ', '//']]
-        self.assertEqual(obs, exp)
-
-
-class LineGrouperTests(TestCase):
-    def test_parser(self):
-        good = ['  \t   >abc  \n',
-                '\t   def\n',
-                '\t\n',
-                '\t >efg \n',
-                'ghi',
-                ]
-        c = LineGrouper(2)
-        self.assertEqual(list(c(good)), [['>abc', 'def'], ['>efg', 'ghi']])
-        c = LineGrouper(1)
-        self.assertEqual(list(c(good)), [['>abc'], ['def'], ['>efg'], ['ghi']])
-        c = LineGrouper(4)
-        self.assertEqual(list(c(good)), [['>abc', 'def', '>efg', 'ghi']])
-        # shouldn't work if not evenly divisible
-        c = LineGrouper(3)
-        self.assertRaises(RecordError, list, c(good))
-
-    def test_parser_ignore(self):
-        def never(line):
-            return False
-
-        def ignore_labels(line):
-            return (not line) or line.isspace() or line.startswith('#')
-
-        lines = ['abc', '\n', '1', 'def', '#ignore', '2']
-        self.assertEqual(list(LineGrouper(1)(lines)),
-                         [['abc'], ['1'], ['def'], ['#ignore'], ['2']])
-        self.assertEqual(list(LineGrouper(1, ignore=never)(lines)),
-                         [[i.strip()] for i in lines])
-        self.assertEqual(list(LineGrouper(2, ignore=ignore_labels)(lines)),
-                         [['abc', '1'], ['def', '2']])
-
-    def test_constructor_is_none(self):
-        lines = ['abc', ' def   ', ' ghi', 'jkl  ']
-
-        # should strip
-        exp = [['abc', 'def'], ['ghi', 'jkl']]
-        obs = list(LineGrouper(2)(lines))
-        self.assertEqual(obs, exp)
-
-        # should not strip
-        exp = [['abc', ' def   '], [' ghi', 'jkl  ']]
-        obs = list(LineGrouper(2, constructor=None)(lines))
-        self.assertEqual(obs, exp)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/skbio/sequence/__init__.py b/skbio/sequence/__init__.py
index 4a9427d..77b0e43 100644
--- a/skbio/sequence/__init__.py
+++ b/skbio/sequence/__init__.py
@@ -1,16 +1,26 @@
 r"""
-Biological sequences (:mod:`skbio.sequence`)
-============================================
+Sequences (:mod:`skbio.sequence`)
+=================================
 
 .. currentmodule:: skbio.sequence
 
-This module provides functionality for working with biological sequences,
-including generic sequences, nucelotide sequences, DNA sequences, and RNA
-sequences. Class methods and attributes are also available to obtain valid
-character sets, complement maps for different sequence types, and for
-obtaining degenerate character definitions. Additionaly this module defines the
-``GeneticCode`` class, which represents an immutable object that translates RNA
-or DNA strings to amino acid sequences.
+This module provides classes for storing and working with biological sequences,
+including generic sequences which have no restrictions on which characters can
+be included, and sequences based on IUPAC-defined sets of allowed characters
+(including degenerate characters), including ``DNA``, ``RNA`` and ``Protein``
+sequences. Common operations are defined as methods, for example computing the
+reverse complement of a DNA sequence, or searching for N-glycosylation motifs
+in ``Protein`` sequences. Class attributes are available to obtain valid
+character sets, complement maps for different sequence types, and for obtaining
+degenerate character definitions. Additionally this module defines the
+``GeneticCode`` class, which represents an immutable object that translates DNA
+or RNA sequences into protein sequences.
+
+The primary information stored for each different type of sequence object is
+the underlying sequence data itself. This is stored as an immutable Numpy
+array. Additionally, each type of sequence may include optional metadata
+and positional metadata. Note that metadata and positional metadata are
+mutable.
 
 Classes
 -------
@@ -18,126 +28,253 @@ Classes
 .. autosummary::
    :toctree: generated/
 
-   BiologicalSequence
-   NucleotideSequence
-   DNASequence
-   RNASequence
-   ProteinSequence
+   Sequence
+   DNA
+   RNA
+   Protein
    GeneticCode
 
-Functions
----------
-
-.. autosummary::
-   :toctree: generated/
-
-   genetic_code
-
-Exceptions
-----------
-
-.. autosummary::
-   :toctree: generated/
-
-   BiologicalSequenceError
-   GeneticCodeError
-   GeneticCodeInitError
-   InvalidCodonError
-
 Examples
 --------
->>> from skbio.sequence import DNASequence, RNASequence
+>>> from skbio import DNA, RNA
 
-New sequences are created with optional id and description fields.
+New sequences are created with optional metadata and positional metadata
+fields. Metadata is stored as a Python dict, while positional metadata
+becomes a Pandas DataFrame.
 
->>> d1 = DNASequence('ACC--G-GGTA..')
->>> d1 = DNASequence('ACC--G-GGTA..',id="seq1")
->>> d1 = DNASequence('ACC--G-GGTA..',id="seq1",description="GFP")
+>>> d = DNA('ACCGGGTA')
+>>> d = DNA('ACCGGGTA', metadata={'id':"my-sequence", 'description':"GFP"},
+...          positional_metadata={'quality':[22, 25, 22, 18, 23, 25, 25, 25]})
+>>> d = DNA('ACCGGTA', metadata={'id':"my-sequence"})
 
 New sequences can also be created from existing sequences, for example as their
 reverse complement or degapped (i.e., unaligned) version.
 
+>>> d1 = DNA('.ACC--GGG-TA...', metadata={'id':'my-sequence'})
 >>> d2 = d1.degap()
->>> d1
-<DNASequence: ACC--G-GGT... (length: 13)>
 >>> d2
-<DNASequence: ACCGGGTA (length: 8)>
+DNA
+-----------------------------
+Metadata:
+    'id': 'my-sequence'
+Stats:
+    length: 8
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 62.50%
+-----------------------------
+0 ACCGGGTA
 >>> d3 = d2.reverse_complement()
 >>> d3
-<DNASequence: TACCCGGT (length: 8)>
-
-It's also straight-forward to compute distances between sequences (optionally
-using user-defined distance metrics, default is Hamming distance) for use in
+DNA
+-----------------------------
+Metadata:
+    'id': 'my-sequence'
+Stats:
+    length: 8
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 62.50%
+-----------------------------
+0 TACCCGGT
+
+It's also straightforward to compute distances between sequences (optionally
+using user-defined distance metrics, the default is Hamming distance which
+requires that the sequences being compared are the same length) for use in
 sequence clustering, phylogenetic reconstruction, etc.
 
->>> d4 = DNASequence('GACCCGCT')
->>> d5 = DNASequence('GACCCCCT')
->>> d3.distance(d4)
-0.25
->>> d3.distance(d5)
-0.375
+>>> r1 = RNA('GACCCGCUUU')
+>>> r2 = RNA('GCCCCCCUUU')
+>>> r1.distance(r2)
+0.2
+
+Similarly, you can calculate the percent (dis)similarity between a pair of
+aligned sequences.
+
+>>> r3 = RNA('ACCGUUAGUC')
+>>> r4 = RNA('ACGGGU--UC')
+>>> r3.match_frequency(r4, relative=True)
+0.6
+>>> r3.mismatch_frequency(r4, relative=True)
+0.4
+
+Sequences can be searched for known motif types. This returns the slices that
+describe the matches.
+
+>>> r5 = RNA('AGG-GGACUGAA')
+>>> for motif in r5.find_motifs('purine-run', min_length=2):
+...     motif
+slice(0, 3, None)
+slice(4, 7, None)
+slice(9, 12, None)
+
+Those slices can be used to extract the relevant subsequences.
+
+>>> for motif in r5.find_motifs('purine-run', min_length=2):
+...     r5[motif]
+...     print('')
+RNA
+-----------------------------
+Stats:
+    length: 3
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 66.67%
+-----------------------------
+0 AGG
+<BLANKLINE>
+RNA
+-----------------------------
+Stats:
+    length: 3
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 66.67%
+-----------------------------
+0 GGA
+<BLANKLINE>
+RNA
+-----------------------------
+Stats:
+    length: 3
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 33.33%
+-----------------------------
+0 GAA
+<BLANKLINE>
+
+And gaps or other features can be ignored while searching, as these may disrupt
+otherwise meaningful motifs.
+
+>>> for motif in r5.find_motifs('purine-run', min_length=2, ignore=r5.gaps()):
+...     r5[motif]
+...     print('')
+RNA
+-----------------------------
+Stats:
+    length: 7
+    has gaps: True
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 66.67%
+-----------------------------
+0 AGG-GGA
+<BLANKLINE>
+RNA
+-----------------------------
+Stats:
+    length: 3
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 33.33%
+-----------------------------
+0 GAA
+<BLANKLINE>
+
+In the above example, removing gaps from the resulting motif matches is easily
+achieved, as the sliced matches themselves are sequences of the same type as
+the input.
+
+>>> for motif in r5.find_motifs('purine-run', min_length=2, ignore=r5.gaps()):
+...     r5[motif].degap()
+...     print('')
+RNA
+-----------------------------
+Stats:
+    length: 6
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 66.67%
+-----------------------------
+0 AGGGGA
+<BLANKLINE>
+RNA
+-----------------------------
+Stats:
+    length: 3
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 33.33%
+-----------------------------
+0 GAA
+<BLANKLINE>
+
+Sequences can similarly be searched for arbitrary patterns using regular
+expressions.
+
+>>> for match in r5.find_with_regex('(G+AC[UT])'):
+...     match
+slice(4, 9, None)
+
+DNA can be transcribed to RNA:
+
+>>> dna = DNA('ATGTGTATTTGA')
+>>> rna = dna.transcribe()
+>>> rna
+RNA
+-----------------------------
+Stats:
+    length: 12
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    GC-content: 25.00%
+-----------------------------
+0 AUGUGUAUUU GA
+
+Both DNA and RNA can be translated into a protein sequence. For example, let's
+translate our DNA and RNA sequences using NCBI's standard genetic code (table
+ID 1, the default genetic code in scikit-bio):
+
+>>> protein_from_dna = dna.translate()
+>>> protein_from_dna
+Protein
+-----------------------------
+Stats:
+    length: 4
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    has stops: True
+-----------------------------
+0 MCI*
+>>> protein_from_rna = rna.translate()
+>>> protein_from_rna
+Protein
+-----------------------------
+Stats:
+    length: 4
+    has gaps: False
+    has degenerates: False
+    has non-degenerates: True
+    has stops: True
+-----------------------------
+0 MCI*
+
+The two translations are equivalent:
+
+>>> protein_from_dna == protein_from_rna
+True
 
 Class-level methods contain information about the molecule types.
 
->>> DNASequence.iupac_degeneracies()['B']
+>>> DNA.degenerate_map['B']
 set(['C', 'T', 'G'])
 
->>> RNASequence.iupac_degeneracies()['B']
+>>> RNA.degenerate_map['B']
 set(['C', 'U', 'G'])
 
->>> DNASequence.is_gap('-')
-True
-
-Creating and using a ``GeneticCode`` object
-
->>> from skbio.sequence import genetic_code
->>> from pprint import pprint
->>> sgc = genetic_code(1)
->>> sgc
-GeneticCode(FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG)
->>> sgc['UUU'] == 'F'
-True
->>> sgc['TTT'] == 'F'
-True
->>> sgc['F'] == ['TTT', 'TTC']          #in arbitrary order
-True
->>> sgc['*'] == ['TAA', 'TAG', 'TGA']   #in arbitrary order
-True
-
-Retrieving the anticodons of the object
-
->>> pprint(sgc.anticodons)
-{'*': ['TTA', 'CTA', 'TCA'],
- 'A': ['AGC', 'GGC', 'TGC', 'CGC'],
- 'C': ['ACA', 'GCA'],
- 'D': ['ATC', 'GTC'],
- 'E': ['TTC', 'CTC'],
- 'F': ['AAA', 'GAA'],
- 'G': ['ACC', 'GCC', 'TCC', 'CCC'],
- 'H': ['ATG', 'GTG'],
- 'I': ['AAT', 'GAT', 'TAT'],
- 'K': ['TTT', 'CTT'],
- 'L': ['TAA', 'CAA', 'AAG', 'GAG', 'TAG', 'CAG'],
- 'M': ['CAT'],
- 'N': ['ATT', 'GTT'],
- 'P': ['AGG', 'GGG', 'TGG', 'CGG'],
- 'Q': ['TTG', 'CTG'],
- 'R': ['ACG', 'GCG', 'TCG', 'CCG', 'TCT', 'CCT'],
- 'S': ['AGA', 'GGA', 'TGA', 'CGA', 'ACT', 'GCT'],
- 'T': ['AGT', 'GGT', 'TGT', 'CGT'],
- 'V': ['AAC', 'GAC', 'TAC', 'CAC'],
- 'W': ['CCA'],
- 'Y': ['ATA', 'GTA']}
-
-NucleotideSequences can be translated using a ``GeneticCode`` object.
-
->>> d6 = DNASequence('ATGTCTAAATGA')
->>> from skbio.sequence import genetic_code
->>> gc = genetic_code(11)
->>> gc.translate(d6)
-<ProteinSequence: MSK* (length: 4)>
-
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -146,18 +283,16 @@ NucleotideSequences can be translated using a ``GeneticCode`` object.
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
 
-from ._exception import (BiologicalSequenceError, GeneticCodeError,
-                         GeneticCodeInitError, InvalidCodonError)
-from ._sequence import (BiologicalSequence, NucleotideSequence, DNASequence,
-                        RNASequence, ProteinSequence, DNA, RNA, Protein)
-from ._genetic_code import GeneticCode, genetic_code
+from ._sequence import Sequence
+from ._protein import Protein
+from ._dna import DNA
+from ._rna import RNA
+from ._genetic_code import GeneticCode
 
-__all__ = ['BiologicalSequenceError', 'GeneticCodeError',
-           'GeneticCodeInitError', 'InvalidCodonError', 'BiologicalSequence',
-           'NucleotideSequence', 'DNASequence', 'RNASequence',
-           'ProteinSequence', 'DNA', 'RNA', 'Protein', 'GeneticCode',
-           'genetic_code']
+__all__ = ['Sequence', 'Protein', 'DNA', 'RNA', 'GeneticCode']
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/sequence/_base.py b/skbio/sequence/_base.py
new file mode 100644
index 0000000..9e71c2b
--- /dev/null
+++ b/skbio/sequence/_base.py
@@ -0,0 +1,43 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+
+class ElasticLines(object):
+    """Store blocks of content separated by dashed lines.
+
+    Each dashed line (separator) is as long as the longest content
+    (non-separator) line.
+
+    """
+
+    def __init__(self):
+        self._lines = []
+        self._separator_idxs = []
+        self._max_line_len = -1
+
+    def add_line(self, line):
+        line_len = len(line)
+        if line_len > self._max_line_len:
+            self._max_line_len = line_len
+        self._lines.append(line)
+
+    def add_lines(self, lines):
+        for line in lines:
+            self.add_line(line)
+
+    def add_separator(self):
+        self._lines.append(None)
+        self._separator_idxs.append(len(self._lines) - 1)
+
+    def to_str(self):
+        separator = '-' * self._max_line_len
+        for idx in self._separator_idxs:
+            self._lines[idx] = separator
+        return '\n'.join(self._lines)
diff --git a/skbio/sequence/_dna.py b/skbio/sequence/_dna.py
new file mode 100644
index 0000000..d4abd77
--- /dev/null
+++ b/skbio/sequence/_dna.py
@@ -0,0 +1,410 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from skbio.util._decorator import classproperty, overrides
+from skbio.util._decorator import stable
+from ._rna import RNA
+from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
+from ._iupac_sequence import IUPACSequence
+
+
+class DNA(IUPACSequence, NucleotideMixin):
+    """Store DNA sequence data and optional associated metadata.
+
+    Only characters in the IUPAC DNA character set [1]_ are supported.
+
+    Parameters
+    ----------
+    sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+        Characters representing the DNA sequence itself.
+    metadata : dict, optional
+        Arbitrary metadata which applies to the entire sequence.
+    positional_metadata : Pandas DataFrame consumable, optional
+        Arbitrary per-character metadata. For example, quality data from
+        sequencing reads. Must be able to be passed directly to the Pandas
+        DataFrame constructor.
+    validate : bool, optional
+        If ``True``, validation will be performed to ensure that all sequence
+        characters are in the IUPAC DNA character set. If ``False``, validation
+        will not be performed. Turning off validation will improve runtime
+        performance. If invalid characters are present, however, there is
+        **no guarantee that operations performed on the resulting object will
+        work or behave as expected.** Only turn off validation if you are
+        certain that the sequence characters are valid. To store sequence data
+        that is not IUPAC-compliant, use ``Sequence``.
+    lowercase : bool or str, optional
+        If ``True``, lowercase sequence characters will be converted to
+        uppercase characters in order to be valid IUPAC DNA characters. If
+        ``False``, no characters will be converted. If a str, it will be
+        treated as a key into the positional metadata of the object. All
+        lowercase characters will be converted to uppercase, and a ``True``
+        value will be stored in a boolean array in the positional metadata
+        under the key.
+
+    Attributes
+    ----------
+    values
+    metadata
+    positional_metadata
+    alphabet
+    gap_chars
+    nondegenerate_chars
+    degenerate_chars
+    degenerate_map
+    complement_map
+
+    See Also
+    --------
+    RNA
+
+    References
+    ----------
+    .. [1] Nomenclature for incompletely specified bases in nucleic acid
+       sequences: recommendations 1984.
+       Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
+       A Cornish-Bowden
+
+    Examples
+    --------
+    >>> from skbio import DNA
+    >>> DNA('ACCGAAT')
+    DNA
+    -----------------------------
+    Stats:
+        length: 7
+        has gaps: False
+        has degenerates: False
+        has non-degenerates: True
+        GC-content: 42.86%
+    -----------------------------
+    0 ACCGAAT
+
+    Convert lowercase characters to uppercase:
+
+    >>> DNA('AcCGaaT', lowercase=True)
+    DNA
+    -----------------------------
+    Stats:
+        length: 7
+        has gaps: False
+        has degenerates: False
+        has non-degenerates: True
+        GC-content: 42.86%
+    -----------------------------
+    0 ACCGAAT
+
+    """
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    @overrides(NucleotideMixin)
+    def complement_map(cls):
+        comp_map = {
+            'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
+            'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
+            'H': 'D', 'V': 'B', 'N': 'N'
+        }
+
+        comp_map.update({c: c for c in cls.gap_chars})
+        return comp_map
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    @overrides(IUPACSequence)
+    def nondegenerate_chars(cls):
+        return set("ACGT")
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    @overrides(IUPACSequence)
+    def degenerate_map(cls):
+        return {
+            "R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
+            "W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
+            "H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
+        }
+
+    @property
+    def _motifs(self):
+        return _motifs
+
+    @stable(as_of="0.4.0")
+    def transcribe(self):
+        """Transcribe DNA into RNA.
+
+        DNA sequence is assumed to be the coding strand. Thymine (T) is
+        replaced with uracil (U) in the transcribed sequence.
+
+        Returns
+        -------
+        RNA
+            Transcribed sequence.
+
+        See Also
+        --------
+        translate
+        translate_six_frames
+
+        Notes
+        -----
+        DNA sequence's metadata and positional metadata are included in the
+        transcribed RNA sequence.
+
+        Examples
+        --------
+        Transcribe DNA into RNA:
+
+        >>> from skbio import DNA
+        >>> dna = DNA('TAACGTTA')
+        >>> dna
+        DNA
+        -----------------------------
+        Stats:
+            length: 8
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 25.00%
+        -----------------------------
+        0 TAACGTTA
+        >>> dna.transcribe()
+        RNA
+        -----------------------------
+        Stats:
+            length: 8
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 25.00%
+        -----------------------------
+        0 UAACGUUA
+
+        """
+        seq = self._string.replace(b'T', b'U')
+
+        metadata = None
+        if self.has_metadata():
+            metadata = self.metadata
+
+        positional_metadata = None
+        if self.has_positional_metadata():
+            positional_metadata = self.positional_metadata
+
+        # turn off validation because `seq` is guaranteed to be valid
+        return RNA(seq, metadata=metadata,
+                   positional_metadata=positional_metadata, validate=False)
+
+    @stable(as_of="0.4.0")
+    def translate(self, *args, **kwargs):
+        """Translate DNA sequence into protein sequence.
+
+        DNA sequence is assumed to be the coding strand. DNA sequence is first
+        transcribed into RNA and then translated into protein.
+
+        Parameters
+        ----------
+        args : tuple
+            Positional arguments accepted by ``RNA.translate``.
+        kwargs : dict
+            Keyword arguments accepted by ``RNA.translate``.
+
+        Returns
+        -------
+        Protein
+            Translated sequence.
+
+        See Also
+        --------
+        RNA.translate
+        translate_six_frames
+        transcribe
+
+        Notes
+        -----
+        DNA sequence's metadata are included in the translated protein
+        sequence. Positional metadata are not included.
+
+        Examples
+        --------
+        Translate DNA into protein using NCBI's standard genetic code (table ID
+        1, the default genetic code in scikit-bio):
+
+        >>> from skbio import DNA
+        >>> dna = DNA('ATGCCACTTTAA')
+        >>> dna.translate()
+        Protein
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 MPL*
+
+        Translate the same DNA sequence using a different NCBI genetic code
+        (table ID 3, the yeast mitochondrial code) and specify that translation
+        must terminate at the first stop codon:
+
+        >>> dna.translate(3, stop='require')
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 MPT
+
+        """
+        return self.transcribe().translate(*args, **kwargs)
+
+    @stable(as_of="0.4.0")
+    def translate_six_frames(self, *args, **kwargs):
+        """Translate DNA into protein using six possible reading frames.
+
+        DNA sequence is assumed to be the coding strand. DNA sequence is first
+        transcribed into RNA and then translated into protein. The six possible
+        reading frames are:
+
+        * 1 (forward)
+        * 2 (forward)
+        * 3 (forward)
+        * -1 (reverse)
+        * -2 (reverse)
+        * -3 (reverse)
+
+        Translated sequences are yielded in this order.
+
+        Parameters
+        ----------
+        args : tuple
+            Positional arguments accepted by ``RNA.translate_six_frames``.
+        kwargs : dict
+            Keyword arguments accepted by ``RNA.translate_six_frames``.
+
+        Yields
+        ------
+        Protein
+            Translated sequence in the current reading frame.
+
+        See Also
+        --------
+        RNA.translate_six_frames
+        translate
+        transcribe
+
+        Notes
+        -----
+        This method is faster than (and equivalent to) performing six
+        independent translations using, for example:
+
+        ``(seq.translate(reading_frame=rf)
+        for rf in GeneticCode.reading_frames)``
+
+        DNA sequence's metadata are included in each translated protein
+        sequence. Positional metadata are not included.
+
+        Examples
+        --------
+        Translate DNA into protein using the six possible reading frames and
+        NCBI's standard genetic code (table ID 1, the default genetic code in
+        scikit-bio):
+
+        >>> from skbio import DNA
+        >>> dna = DNA('ATGCCACTTTAA')
+        >>> for protein in dna.translate_six_frames():
+        ...     protein
+        ...     print('')
+        Protein
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 MPL*
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 CHF
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 ATL
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 LKWH
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 *SG
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 KVA
+        <BLANKLINE>
+
+        """
+        return self.transcribe().translate_six_frames(*args, **kwargs)
+
+    @overrides(IUPACSequence)
+    def _repr_stats(self):
+        """Define custom statistics to display in the sequence's repr."""
+        stats = super(DNA, self)._repr_stats()
+        stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
+        return stats
+
+
+_motifs = _parent_motifs.copy()
+
+# Leave this at the bottom
+_motifs.interpolate(DNA, "find_motifs")
diff --git a/skbio/sequence/_exception.py b/skbio/sequence/_exception.py
deleted file mode 100644
index 259b15c..0000000
--- a/skbio/sequence/_exception.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from __future__ import absolute_import, division, print_function
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2013--, scikit-bio development team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-
-class BiologicalSequenceError(Exception):
-    """General error for biological sequence validation failures."""
-    pass
-
-
-class GeneticCodeError(Exception):
-    """Base class exception used by the GeneticCode class"""
-    pass
-
-
-class GeneticCodeInitError(ValueError, GeneticCodeError):
-    """Exception raised by the GeneticCode class upon a bad initialization"""
-    pass
-
-
-class InvalidCodonError(KeyError, GeneticCodeError):
-    """Exception raised by the GeneticCode class if __getitem__ fails"""
-    pass
diff --git a/skbio/sequence/_genetic_code.py b/skbio/sequence/_genetic_code.py
index 9794c1c..cd9950f 100644
--- a/skbio/sequence/_genetic_code.py
+++ b/skbio/sequence/_genetic_code.py
@@ -6,615 +6,833 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-import re
+from __future__ import absolute_import, division, print_function
 
-from collections import defaultdict
+import numpy as np
+from future.builtins import range
 
+from skbio.util._decorator import classproperty, stable
 from skbio._base import SkbioObject
-from skbio.sequence import Protein, InvalidCodonError, GeneticCodeInitError
+from skbio.sequence import Protein, RNA
+from skbio.sequence._base import ElasticLines
 
-# py3k compatibility
-try:
-    from string import maketrans
-except ImportError:
-    maketrans = str.maketrans
 
-_dna_trans = maketrans('TCAG', 'AGTC')
+class GeneticCode(SkbioObject):
+    """Genetic code for translating codons to amino acids.
 
+    Parameters
+    ----------
+    amino_acids : consumable by ``skbio.Protein`` constructor
+        64-character vector containing IUPAC amino acid characters. The order
+        of the amino acids should correspond to NCBI's codon order (see *Notes*
+        section below). `amino_acids` is the "AAs" field in NCBI's genetic
+        code format [1]_.
+    starts : consumable by ``skbio.Protein`` constructor
+        64-character vector containing only M and - characters, with start
+        codons indicated by M. The order of the amino acids should correspond
+        to NCBI's codon order (see *Notes* section below). `starts` is the
+        "Starts" field in NCBI's genetic code format [1]_.
+    name : str, optional
+        Genetic code name. This is simply metadata and does not affect the
+        functionality of the genetic code itself.
 
-def _simple_rc(seq):
-    """simple reverse-complement: works only on unambiguous uppercase DNA"""
-    return seq.translate(_dna_trans)[::-1]
+    See Also
+    --------
+    RNA.translate
+    DNA.translate
+    GeneticCode.from_ncbi
 
+    Notes
+    -----
+    The genetic codes available via ``GeneticCode.from_ncbi`` and used
+    throughout the examples are defined in [1]_. The genetic code strings
+    defined there are directly compatible with the ``GeneticCode`` constructor.
 
-class GeneticCode(SkbioObject):
+    The order of `amino_acids` and `starts` should correspond to NCBI's codon
+    order, defined in [1]_::
 
-    """Class to hold codon to amino acid mapping, and vice versa.
+        UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
+        UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGG
+        UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG
 
-    Attributes
-    ----------
-    code_sequence
-    id
-    name
-    start_codon_sequence
-    start_codons
-    codons
-    synonyms
-    sense_codons
-    anticodons
-    blocks
+    Note that scikit-bio displays this ordering using the IUPAC RNA alphabet,
+    while NCBI displays this same ordering using the IUPAC DNA alphabet (for
+    historical purposes).
 
-    Parameters
+    References
     ----------
-    code_sequence : str
-        64-character string containing NCBI representation.
-    id : str, optional
-        identifier for the object.
-    name : str, optional
-        name for the object.
-    start_codon_sequence : str, optional
-        starting point for the codon sequence.
-
-    Returns
-    -------
-    GeneticCode
-        initialized ``GeneticCode`` object.
-
-    Raises
-    ------
-    GeneticCodeInitError
-        If the length of `code_sequence` is different to `64`.
-
-    Methods
-    -------
-    changes
-    get_stop_indices
-    is_start
-    is_stop
-    translate_six_frames
-    translate
-    __repr__
-    __getitem__
-    __str__
-    __eq__
+    .. [1] http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
 
     Examples
     --------
-    >>> from skbio.sequence import GeneticCode
-    >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSR'
-    ...                   'RVVVVAAAADDEEGGGG')
-
-    .. note:: `*` is used to denote termination as per the NCBI standard.
-        Although the genetic code objects convert DNA to RNA and vice versa,
-        lists of codons that they produce will be provided in DNA format.
+    Get NCBI's standard genetic code (table ID 1, the default genetic code
+    in scikit-bio):
+
+    >>> from skbio import GeneticCode
+    >>> GeneticCode.from_ncbi()
+    GeneticCode (Standard)
+    -------------------------------------------------------------------------
+      AAs  = FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
+    Starts = ---M---------------M---------------M----------------------------
+    Base1  = UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
+    Base2  = UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGG
+    Base3  = UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG
+
+    Get a different NCBI genetic code (25):
+
+    >>> GeneticCode.from_ncbi(25)
+    GeneticCode (Candidate Division SR1 and Gracilibacteria)
+    -------------------------------------------------------------------------
+      AAs  = FFLLSSSSYY**CCGWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
+    Starts = ---M-------------------------------M---------------M------------
+    Base1  = UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
+    Base2  = UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGG
+    Base3  = UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG
+
+    Define a custom genetic code:
+
+    >>> GeneticCode('M' * 64, '-' * 64)
+    GeneticCode
+    -------------------------------------------------------------------------
+      AAs  = MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
+    Starts = ----------------------------------------------------------------
+    Base1  = UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
+    Base2  = UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGG
+    Base3  = UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG
+
+    Translate an RNA sequence to protein using NCBI's standard genetic code:
+
+    >>> from skbio import RNA
+    >>> rna = RNA('AUGCCACUUUAA')
+    >>> GeneticCode.from_ncbi().translate(rna)
+    Protein
+    -----------------------------
+    Stats:
+        length: 4
+        has gaps: False
+        has degenerates: False
+        has non-degenerates: True
+        has stops: True
+    -----------------------------
+    0 MPL*
 
     """
-    # class data: need the bases, the list of codons in UUU -> GGG order, and
-    # a mapping from positions in the list back to codons. These should be the
-    # same for all GeneticCode instances, and are immutable (therefore
-    # private).
-    _codons = [a + b + c for a in "TCAG" for b in "TCAG" for c in "TCAG"]
-
-    def __init__(self, code_sequence, id=None, name=None,
-                 start_codon_sequence=None):
-        if len(code_sequence) != 64:
-            raise GeneticCodeInitError("code_sequence: %s has length %d, but "
-                                       "expected 64" % (code_sequence,
-                                                        len(code_sequence)))
-
-        self.code_sequence = code_sequence
-        self.id = id
-        self.name = name
-        self.start_codon_sequence = start_codon_sequence
-        start_codons = {}
-        if start_codon_sequence is not None:
-            for codon, aa in zip(self._codons, start_codon_sequence):
-                if aa != '-':
-                    start_codons[codon] = aa
-        self.start_codons = start_codons
-        codon_lookup = {key: value for (key, value) in zip(self._codons,
-                                                           code_sequence)}
-        self.codons = codon_lookup
-
-        # create synonyms for each aa
-        aa_lookup = defaultdict(list)
-        for codon in self._codons:
-            aa = codon_lookup[codon]
-            aa_lookup[aa].append(codon)
-        self.synonyms = dict(aa_lookup)
-        sense_codons = codon_lookup.copy()
-
-        # create sense codons
-        stop_codons = self['*']
-        for c in stop_codons:
-            del sense_codons[c]
-        self.sense_codons = sense_codons
-
-        # create anticodons
-        ac = {}
-        for aa, codons in self.synonyms.items():
-            ac[aa] = [_simple_rc(element) for element in codons]
-        self.anticodons = ac
-
-    def _analyze_quartet(self, codons, aa):
-        """Analyzes a quartet of codons and amino acids: returns list of lists.
-
-        Each list contains one block, splitting at purine/pyrimidine boundary
-        if necessary.
-
-        codons should be a list of 4 codons.
-        aa should be a list of 4 amino acid symbols.
-
-        Possible states:
-            - All amino acids are the same: returns list of one quartet.
-            - Two groups of 2 aa: returns list of two doublets.
-            - One group of 2 and 2 groups of 1: list of one doublet, 2 singles.
-            - 4 groups of 1: four singles.
-
-        Note: codon blocks like Ile in the standard code (AUU, AUC, AUA) will
-        be split when they cross the R/Y boundary, so [[AUU, AUC], [AUA]]. This
-        would also apply to a block like AUC AUA AUG -> [[AUC],[AUA,AUG]],
-        although this latter pattern is not observed in the standard code.
-        """
-        if aa[0] == aa[1]:
-            first_doublet = True
-        else:
-            first_doublet = False
-        if aa[2] == aa[3]:
-            second_doublet = True
-        else:
-            second_doublet = False
-        if first_doublet and second_doublet and aa[1] == aa[2]:
-            return [codons]
-        else:
-            blocks = []
-            if first_doublet:
-                blocks.append(codons[:2])
-            else:
-                blocks.extend([[codons[0]], [codons[1]]])
-            if second_doublet:
-                blocks.append(codons[2:])
-            else:
-                blocks.extend([[codons[2]], [codons[3]]])
-            return blocks
-
-    def _get_blocks(self):
-        """Returns list of lists of codon blocks in the genetic code.
-
-        A codon block can be:
-            - a quartet, if all 4 XYn codons have the same amino acid.
-            - a doublet, if XYt and XYc or XYa and XYg have the same aa.
-            - a singlet, otherwise.
+    _num_codons = 64
+    _radix_multiplier = np.asarray([16, 4, 1], dtype=np.uint8)
+    _start_stop_options = ['ignore', 'optional', 'require']
+    __offset_table = None
+
+    @classproperty
+    def _offset_table(cls):
+        if cls.__offset_table is None:
+            # create lookup table that is filled with 255 everywhere except for
+            # indices corresponding to U, C, A, and G. 255 was chosen to
+            # represent invalid character offsets because it will create an
+            # invalid (out of bounds) index into `amino_acids` which should
+            # error noisily. this is important in case the valid nondegenerate
+            # IUPAC RNA characters change in the future and the assumptions
+            # currently made by the code become invalid
+            table = np.empty(ord(b'U') + 1, dtype=np.uint8)
+            table.fill(255)
+            table[ord(b'U')] = 0
+            table[ord(b'C')] = 1
+            table[ord(b'A')] = 2
+            table[ord(b'G')] = 3
+            cls.__offset_table = table
+        return cls.__offset_table
+
+    @classmethod
+    @stable(as_of="0.4.0")
+    def from_ncbi(cls, table_id=1):
+        """Return NCBI genetic code specified by table ID.
+
+        Parameters
+        ----------
+        table_id : int, optional
+            Table ID of the NCBI genetic code to return.
 
         Returns
         -------
-        list
-            Returns a list of the quartets, doublets, and singlets in the order
-            UUU -> GGG.
+        GeneticCode
+            NCBI genetic code specified by `table_id`.
 
         Notes
         -----
-        A doublet cannot span the purine/pyrimidine boundary, and a quartet
-        cannot span the boundary between two codon blocks whose first two bases
-        differ.
+        The table IDs and genetic codes available in this method and used
+        throughout the examples are defined in [1]_.
 
-        """
-        if hasattr(self, '_blocks'):
-            return self._blocks
-        else:
-            blocks = []
-            curr_codons = []
-            curr_aa = []
-            for index, codon, aa in zip(range(64), self._codons,
-                                        self.code_sequence):
-                # we're in a new block if it's a new quartet or a different aa
-                new_quartet = not index % 4
-                if new_quartet and curr_codons:
-                    blocks.extend(self._analyze_quartet(curr_codons, curr_aa))
-                    curr_codons = []
-                    curr_aa = []
-                curr_codons.append(codon)
-                curr_aa.append(aa)
-            # don't forget to append last block
-            if curr_codons:
-                blocks.extend(self._analyze_quartet(curr_codons, curr_aa))
-            self._blocks = blocks
-            return self._blocks
-
-    blocks = property(_get_blocks)
+        References
+        ----------
+        .. [1] http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
 
-    def __str__(self):
-        """Returns code_sequence that constructs the GeneticCode
+        Examples
+        --------
+        Get the NCBI thraustochytrium mitochondrial genetic code (23):
+
+        >>> tmgc = GeneticCode.from_ncbi(23)
+        >>> tmgc.name
+        'Thraustochytrium Mitochondrial'
 
-        .. shownumpydoc
         """
-        return self.code_sequence
+        if table_id not in _ncbi_genetic_codes:
+            raise ValueError(
+                "`table_id` must be one of %r, not %r"
+                % (sorted(_ncbi_genetic_codes), table_id))
+        return _ncbi_genetic_codes[table_id]
 
-    def __repr__(self):
-        """Returns reconstructable representation of the GeneticCode
+    @classproperty
+    @stable(as_of="0.4.0")
+    def reading_frames(cls):
+        """Six possible reading frames.
 
-        .. shownumpydoc
-        """
-        return 'GeneticCode(%s)' % str(self)
+        Reading frames are ordered:
 
-    def __eq__(self, other):
-        """ Allows two GeneticCode objects to be compared to each other.
+        * 1 (forward)
+        * 2 (forward)
+        * 3 (forward)
+        * -1 (reverse)
+        * -2 (reverse)
+        * -3 (reverse)
 
-        Two GeneticCode objects are equal if they have equal code_sequences.
+        This property can be passed into
+        ``GeneticCode.translate(reading_frame)``.
+
+        Returns
+        -------
+        list (int)
+            Six possible reading frames.
 
-        .. shownumpydoc
         """
-        if not isinstance(other, GeneticCode):
-            return False
-        return self.code_sequence == other.code_sequence
+        return [1, 2, 3, -1, -2, -3]
 
-    def __ne__(self, other):
-        """Required in Py2."""
-        return not self == other
+    @property
+    @stable(as_of="0.4.0")
+    def name(self):
+        """Genetic code name.
 
-    def __getitem__(self, item):
-        """Returns amino acid corresponding to codon, or codons for an aa.
+        This is simply metadata and does not affect the functionality of the
+        genetic code itself.
 
-        Returns [] for empty list of codons, 'X' for unknown amino acid.
+        Returns
+        -------
+        str
+            Genetic code name.
 
-        .. shownumpydoc
         """
-        item = str(item)
-        if len(item) == 1:  # amino acid
-            return self.synonyms.get(item, [])
-        elif len(item) == 3:  # codon
-            key = item.upper()
-            key = key.replace('U', 'T')
-            return self.codons.get(key, 'X')
-        else:
-            raise InvalidCodonError("Codon or aa %s has wrong length" % item)
-
-    def translate(self, nucleotide_sequence, start=0):
-        """Translate nucleotide to protein sequence
-
-        Parameters
-        ----------
-        nucleotide_sequence : NucleotideSequence
-            sequence to be translated
-        start : int, optional
-            position to begin translation
+        return self._name
+
+    @stable(as_of="0.4.0")
+    def __init__(self, amino_acids, starts, name=''):
+        self._set_amino_acids(amino_acids)
+        self._set_starts(starts)
+        self._name = name
+
+    def _set_amino_acids(self, amino_acids):
+        amino_acids = Protein(amino_acids)
+
+        if len(amino_acids) != self._num_codons:
+            raise ValueError("`amino_acids` must be length %d, not %d"
+                             % (self._num_codons, len(amino_acids)))
+        indices = (amino_acids.values == b'M').nonzero()[0]
+        if indices.size < 1:
+            raise ValueError("`amino_acids` must contain at least one M "
+                             "(methionine) character")
+        self._amino_acids = amino_acids
+        self._m_character_codon = self._index_to_codon(indices[0])
+
+    def _set_starts(self, starts):
+        starts = Protein(starts)
+
+        if len(starts) != self._num_codons:
+            raise ValueError("`starts` must be length %d, not %d"
+                             % (self._num_codons, len(starts)))
+        if ((starts.values == b'M').sum() + (starts.values == b'-').sum() !=
+                len(starts)):
+            # to prevent the user from accidentally swapping `starts` and
+            # `amino_acids` and getting a translation back
+            raise ValueError("`starts` may only contain M and - characters")
+
+        self._starts = starts
+
+        indices = (self._starts.values == b'M').nonzero()[0]
+        codons = np.empty((indices.size, 3), dtype=np.uint8)
+        for i, index in enumerate(indices):
+            codons[i] = self._index_to_codon(index)
+        self._start_codons = codons
+
+    def _index_to_codon(self, index):
+        """Convert AA index (0-63) to codon encoded in offsets (0-3)."""
+        codon = np.empty(3, dtype=np.uint8)
+        for i, multiplier in enumerate(self._radix_multiplier):
+            offset, index = divmod(index, multiplier)
+            codon[i] = offset
+        return codon
+
+    @stable(as_of="0.4.0")
+    def __str__(self):
+        """Return string representation of the genetic code.
 
         Returns
         -------
-        ProteinSequence
-            translation of nucleotide_sequence
+        str
+            Genetic code in NCBI genetic code format.
 
         Notes
         -----
-        ``translate`` returns the translation of the entire sequence, (i.e., of
-        ``nucleotide_sequence[start:]``). It is the user's responsibility to
-        trim to an open reading frame, either from the input or using the
-        output, if that is desired.
+        Representation uses NCBI genetic code format defined in [1]_.
 
-        See Also
-        --------
-        translate_six_frames
-
-        Examples
-        --------
-        >>> from skbio.sequence import GeneticCode
-        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
-        ...                   'RRVVVVAAAADDEEGGGG')
-        >>> sgc.translate('AUGCAUGACUUUUGA', 1)
-        <ProteinSequence: CMTF (length: 4)>
+        References
+        ----------
+        .. [1] http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
 
         """
-        if len(nucleotide_sequence) == 0:
-            return Protein('')
-        if start + 1 > len(nucleotide_sequence):
-            raise ValueError("Translation starts after end of"
-                             "NucleotideSequence")
+        return self._build_repr(include_name=False)
 
-        translation = []
-        for i in range(start, len(nucleotide_sequence) - 2, 3):
-            translation.append(self[nucleotide_sequence[i:i + 3]])
-        translation = Protein(''.join(translation))
-
-        return translation
-
-    def get_stop_indices(self, nucleotide_sequence, start=0):
-        """returns indexes for stop codons in the specified frame
-
-        Parameters
-        ----------
-        nucleotide_sequence : str, NucleotideSequence
-            sequence to be scanned for stop codons
-        start : int, optional
-            position where the search begins.
+    @stable(as_of="0.4.0")
+    def __repr__(self):
+        """Return string representation of the genetic code.
 
         Returns
         -------
-        list
-            indices of the stop codons.
+        str
+            Genetic code in NCBI genetic code format.
 
-        Examples
-        --------
-        >>> from skbio.sequence import GeneticCode, DNA
-        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
-        ...                   'RRVVVVAAAADDEEGGGG')
-        >>> seq = DNA('ATGCTAACATAAA')
-        >>> sgc.get_stop_indices(seq, 0)
-        [9]
+        Notes
+        -----
+        Representation uses NCBI genetic code format defined in [1]_ preceded
+        by a header. If the genetic code has a name, it will be included in the
+        header.
+
+        References
+        ----------
+        .. [1] http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
 
         """
-        stops = self['*']
-        stop_pattern = '(%s)' % '|'.join(stops)
-        stop_pattern = re.compile(stop_pattern)
-        seq = str(nucleotide_sequence)
-        found = [hit.start() for hit in stop_pattern.finditer(seq)]
-        found = [index for index in found if index % 3 == start]
-        return found
+        return self._build_repr(include_name=True)
+
+    def _build_repr(self, include_name):
+        lines = ElasticLines()
+
+        if include_name:
+            name_line = self.__class__.__name__
+            if len(self.name) > 0:
+                name_line += ' (%s)' % self.name
+            lines.add_line(name_line)
+            lines.add_separator()
+
+        lines.add_line('  AAs  = %s' % str(self._amino_acids))
+        lines.add_line('Starts = %s' % str(self._starts))
+        base1 = 'U' * 16 + 'C' * 16 + 'A' * 16 + 'G' * 16
+        lines.add_line('Base1  = %s' % base1)
+        base2 = ('U' * 4 + 'C' * 4 + 'A' * 4 + 'G' * 4) * 4
+        lines.add_line('Base2  = %s' % base2)
+        base3 = 'UCAG' * 16
+        lines.add_line('Base3  = %s' % base3)
+
+        return lines.to_str()
+
+    @stable(as_of="0.4.0")
+    def __eq__(self, other):
+        """Determine if the genetic code is equal to another.
 
-    def translate_six_frames(self, nucleotide_sequence):
-        """Translate nucleotide to protein sequences for all six reading frames
+        Genetic codes are equal if they are *exactly* the same type and
+        defined by the same `amino_acids` and `starts`. A genetic code's name
+        (accessed via ``name`` property) does not affect equality.
 
         Parameters
         ----------
-        nucleotide_sequence : NucleotideSequence
-            sequence to be translated
+        other : GeneticCode
+            Genetic code to test for equality against.
 
         Returns
         -------
-        list
-            the six translated ProteinSequence objects
-
-        See Also
-        --------
-        translate
+        bool
+            Indicates whether the genetic code is equal to `other`.
 
         Examples
         --------
-        >>> from skbio.sequence import GeneticCode, RNA
-        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
-        ...                   'RRVVVVAAAADDEEGGGG')
-        >>> results = sgc.translate_six_frames(RNA('AUGCUAACAUAAA'))
-        >>> for e in results: e
-        <ProteinSequence: MLT* (length: 4)>
-        <ProteinSequence: C*HK (length: 4)>
-        <ProteinSequence: ANI (length: 3)>
-        <ProteinSequence: FMLA (length: 4)>
-        <ProteinSequence: LC*H (length: 4)>
-        <ProteinSequence: YVS (length: 3)>
+        NCBI genetic codes 1 and 2 are not equal:
 
-        """
-        rc_nucleotide_sequence = nucleotide_sequence.rc()
-        results = []
-        for start in range(3):
-            translation = self.translate(nucleotide_sequence, start)
-            results.append(translation)
+        >>> GeneticCode.from_ncbi(1) == GeneticCode.from_ncbi(2)
+        False
 
-        for start in range(3):
-            translation = self.translate(rc_nucleotide_sequence, start)
-            results.append(translation)
+        Define a custom genetic code:
 
-        return results
+        >>> gc = GeneticCode('M' * 64, '-' * 64)
 
-    def is_start(self, codon):
-        """Checks if codon is a start codon
+        Define a second genetic code with the same `amino_acids` and `starts`.
+        Note that the presence of a name does not make the genetic codes
+        unequal:
+
+        >>> named_gc = GeneticCode('M' * 64, '-' * 64, name='example name')
+        >>> gc == named_gc
+        True
+
+        """
+        if self.__class__ != other.__class__:
+            return False
+        # convert Protein to str so that metadata is ignored in comparison. we
+        # only care about the sequence data defining the genetic code
+        if str(self._amino_acids) != str(other._amino_acids):
+            return False
+        if str(self._starts) != str(other._starts):
+            return False
+        return True
+
+    @stable(as_of="0.4.0")
+    def __ne__(self, other):
+        """Determine if the genetic code is not equal to another.
+
+        Genetic codes are not equal if their type, `amino_acids`, or `starts`
+        differ. A genetic code's name (accessed via ``name`` property) does not
+        affect equality.
 
         Parameters
         ----------
-        codon : str
-            codon string
+        other : GeneticCode
+            Genetic code to test for inequality against.
 
         Returns
         -------
         bool
-            ``True`` if codon is a start codon, ``False`` otherwise
-
-        Examples
-        --------
-        >>> from skbio.sequence import GeneticCode
-        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
-        ...                   'RRVVVVAAAADDEEGGGG')
-        >>> sgc.is_start('ATG')
-        False
-        >>> sgc.is_start('AAA')
-        False
+            Indicates whether the genetic code is not equal to `other`.
 
         """
-        fixed_codon = codon.upper().replace('U', 'T')
-        return fixed_codon in self.start_codons
+        return not (self == other)
 
-    def is_stop(self, codon):
-        """Checks if codon is a stop codon
+    @stable(as_of="0.4.0")
+    def translate(self, sequence, reading_frame=1, start='ignore',
+                  stop='ignore'):
+        """Translate RNA sequence into protein sequence.
 
         Parameters
         ----------
-        codon : str
-            codon string
+        sequence : RNA
+            RNA sequence to translate.
+        reading_frame : {1, 2, 3, -1, -2, -3}
+            Reading frame to use in translation. 1, 2, and 3 are forward frames
+            and -1, -2, and -3 are reverse frames. If reverse (negative), will
+            reverse complement the sequence before translation.
+        start : {'ignore', 'require', 'optional'}
+            How to handle start codons:
+
+            * "ignore": translation will start from the beginning of the
+              reading frame, regardless of the presence of a start codon.
+
+            * "require": translation will start at the first start codon in
+              the reading frame, ignoring all prior positions. The first amino
+              acid in the translated sequence will *always* be methionine
+              (M character), even if an alternative start codon was used in
+              translation. This behavior most closely matches the underlying
+              biology since fMet doesn't have a corresponding IUPAC character.
+              If a start codon does not exist, a ``ValueError`` is raised.
+
+            * "optional": if a start codon exists in the reading frame, matches
+              the behavior of "require". If a start codon does not exist,
+              matches the behavior of "ignore".
+
+        stop : {'ignore', 'require', 'optional'}
+            How to handle stop codons:
+
+            * "ignore": translation will ignore the presence of stop codons and
+              translate to the end of the reading frame.
+
+            * "require": translation will terminate at the first stop codon.
+              The stop codon will not be included in the translated sequence.
+              If a stop codon does not exist, a ``ValueError`` is raised.
+
+            * "optional": if a stop codon exists in the reading frame, matches
+              the behavior of "require". If a stop codon does not exist,
+              matches the behavior of "ignore".
 
         Returns
         -------
-        bool
-            ``True`` if codon is a stop codon, ``False`` otherwise
+        Protein
+            Translated sequence.
+
+        See Also
+        --------
+        translate_six_frames
+
+        Notes
+        -----
+        Input RNA sequence metadata are included in the translated protein
+        sequence. Positional metadata are not included.
 
         Examples
         --------
-        >>> from skbio.sequence import GeneticCode
-        >>> sgc = GeneticCode('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSS'
-        ...                   'RRVVVVAAAADDEEGGGG')
-        >>> sgc.is_stop('UAA')
-        True
-        >>> sgc.is_stop('AAA')
-        False
+        Translate RNA into protein using NCBI's standard genetic code (table ID
+        1, the default genetic code in scikit-bio):
+
+        >>> from skbio import RNA, GeneticCode
+        >>> rna = RNA('AGUAUUCUGCCACUGUAAGAA')
+        >>> sgc = GeneticCode.from_ncbi()
+        >>> sgc.translate(rna)
+        Protein
+        -----------------------------
+        Stats:
+            length: 7
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 SILPL*E
+
+        In this command, we used the default ``start`` behavior, which starts
+        translation at the beginning of the reading frame, regardless of the
+        presence of a start codon. If we specify "require", translation will
+        start at the first start codon in the reading frame (in this example,
+        CUG), ignoring all prior positions:
+
+        >>> sgc.translate(rna, start='require')
+        Protein
+        -----------------------------
+        Stats:
+            length: 5
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 MPL*E
+
+        Note that the codon coding for L (CUG) is an alternative start codon in
+        this genetic code. Since we specified "require" mode, methionine (M)
+        was used in place of the alternative start codon (L). This behavior
+        most closely matches the underlying biology since fMet doesn't have a
+        corresponding IUPAC character.
+
+        Translate the same RNA sequence, also specifying that translation
+        terminate at the first stop codon in the reading frame:
+
+        >>> sgc.translate(rna, start='require', stop='require')
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 MPL
+
+        Passing "require" to both ``start`` and ``stop`` trims the translation
+        to the CDS (and in fact requires that one is present in the reading
+        frame). Changing the reading frame to 2 causes an exception to be
+        raised because a start codon doesn't exist in the reading frame:
+
+        >>> sgc.translate(rna, start='require', stop='require',
+        ...               reading_frame=2) # doctest: +IGNORE_EXCEPTION_DETAIL
+        Traceback (most recent call last):
+            ...
+        ValueError: ...
 
         """
-        return self[codon] == '*'
-
-    def changes(self, other):
-        """Returns dictionary of codons that differ
+        self._validate_translate_inputs(sequence, reading_frame, start, stop)
+
+        offset = abs(reading_frame) - 1
+        if reading_frame < 0:
+            sequence = sequence.reverse_complement()
+
+        # Translation strategy:
+        #
+        #   1. Obtain view of underlying sequence bytes from the beginning of
+        #      the reading frame.
+        #   2. Convert bytes to offsets (0-3, base 4 since there are only 4
+        #      characters allowed: UCAG).
+        #   3. Reshape byte vector into (N, 3), where N is the number of codons
+        #      in the reading frame. Each row represents a codon in the
+        #      sequence.
+        #   4. (Optional) Find start codon in the reading frame and trim to
+        #      this position. Replace start codon with M codon.
+        #   5. Convert each codon (encoded as offsets) into an index
+        #      corresponding to an amino acid (0-63).
+        #   6. Obtain translated sequence by indexing into the amino acids
+        #      vector (`amino_acids`) using the indices defined in step 5.
+        #   7. (Optional) Find first stop codon and trim to this position.
+        data = sequence.values[offset:].view(np.uint8)
+        # since advanced indexing is used with an integer ndarray, a copy is
+        # always returned. thus, the in-place modification made below
+        # (replacing the start codon) is safe.
+        data = self._offset_table[data]
+        data = data[:data.size // 3 * 3].reshape((-1, 3))
+
+        if start in {'require', 'optional'}:
+            start_codon_index = data.shape[0]
+            for start_codon in self._start_codons:
+                indices = np.all(data == start_codon, axis=1).nonzero()[0]
+
+                if indices.size > 0:
+                    first_index = indices[0]
+                    if first_index < start_codon_index:
+                        start_codon_index = first_index
+
+            if start_codon_index != data.shape[0]:
+                data = data[start_codon_index:]
+                data[0] = self._m_character_codon
+            elif start == 'require':
+                self._raise_require_error('start', reading_frame)
+
+        indices = (data * self._radix_multiplier).sum(axis=1)
+        translated = self._amino_acids.values[indices]
+
+        if stop in {'require', 'optional'}:
+            stop_codon_indices = (translated == b'*').nonzero()[0]
+            if stop_codon_indices.size > 0:
+                translated = translated[:stop_codon_indices[0]]
+            elif stop == 'require':
+                self._raise_require_error('stop', reading_frame)
+
+        metadata = None
+        if sequence.has_metadata():
+            metadata = sequence.metadata
+
+        # turn off validation because `translated` is guaranteed to be valid
+        return Protein(translated, metadata=metadata, validate=False)
+
+    def _validate_translate_inputs(self, sequence, reading_frame, start, stop):
+        if not isinstance(sequence, RNA):
+            raise TypeError("Sequence to translate must be RNA, not %s" %
+                            type(sequence).__name__)
+
+        if reading_frame not in self.reading_frames:
+            raise ValueError("`reading_frame` must be one of %r, not %r" %
+                             (self.reading_frames, reading_frame))
+
+        for name, value in ('start', start), ('stop', stop):
+            if value not in self._start_stop_options:
+                raise ValueError("`%s` must be one of %r, not %r" %
+                                 (name, self._start_stop_options, value))
+
+        if sequence.has_gaps():
+            raise ValueError("scikit-bio does not support translation of "
+                             "gapped sequences.")
+
+        if sequence.has_degenerates():
+            raise NotImplementedError("scikit-bio does not currently support "
+                                      "translation of degenerate sequences."
+                                      "`RNA.expand_degenerates` can be used "
+                                      "to obtain all non-degenerate versions "
+                                      "of a degenerate sequence.")
+
+    def _raise_require_error(self, name, reading_frame):
+        raise ValueError(
+            "Sequence does not contain a %s codon in the "
+            "current reading frame (`reading_frame=%d`). Presence "
+            "of a %s codon is required with `%s='require'`"
+            % (name, reading_frame, name, name))
+
+    @stable(as_of="0.4.0")
+    def translate_six_frames(self, sequence, start='ignore', stop='ignore'):
+        """Translate RNA into protein using six possible reading frames.
+
+        The six possible reading frames are:
+
+        * 1 (forward)
+        * 2 (forward)
+        * 3 (forward)
+        * -1 (reverse)
+        * -2 (reverse)
+        * -3 (reverse)
+
+        Translated sequences are yielded in this order.
 
         Parameters
         ----------
-        other : GeneticCode
-           genetic code object
+        sequence : RNA
+            RNA sequence to translate.
+        start : {'ignore', 'require', 'optional'}
+            How to handle start codons. See ``GeneticCode.translate`` for
+            details.
+        stop : {'ignore', 'require', 'optional'}
+            How to handle stop codons. See ``GeneticCode.translate`` for
+            details.
+
+        Yields
+        ------
+        Protein
+            Translated sequence in the current reading frame.
 
-        Returns
-        -------
-        dict
-            Returns a dictionary of the form ``{codon:'XY'}`` for codons that
-            differ. X is the string representation of the amino acid in the
-            object calling this method, Y is the string representation of the
-            amino acid in `other`. Always returns a 2-character string.
+        See Also
+        --------
+        translate
+
+        Notes
+        -----
+        This method is faster than (and equivalent to) performing six
+        independent translations using, for example:
+
+        ``(gc.translate(seq, reading_frame=rf)
+        for rf in GeneticCode.reading_frames)``
+
+        Input RNA sequence metadata are included in each translated protein
+        sequence. Positional metadata are not included.
 
         Examples
         --------
-        >>> from skbio.sequence import GeneticCode
-        >>> from pprint import pprint
-        >>> sgc = GeneticCode('FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS*'
-        ...                   '*VVVVAAAADDEEGGGG')
-        >>> pprint(sgc.changes('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTT'
-        ...                    'TNNKKSSRRVVVVAAAADDEEGGGG'))
-        {'AGA': '*R', 'AGG': '*R', 'ATA': 'MI', 'TGA': 'W*'}
+        Translate RNA into protein using the six possible reading frames and
+        NCBI's standard genetic code (table ID 1, the default genetic code in
+        scikit-bio):
+
+        >>> from skbio import RNA, GeneticCode
+        >>> rna = RNA('AUGCCACUUUAA')
+        >>> sgc = GeneticCode.from_ncbi()
+        >>> for protein in sgc.translate_six_frames(rna):
+        ...     protein
+        ...     print('')
+        Protein
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 MPL*
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 CHF
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 ATL
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 LKWH
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 *SG
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 KVA
+        <BLANKLINE>
 
         """
-        changes = {}
-        try:
-            other_code = other.code_sequence
-        except AttributeError:  # try using other directly as sequence
-            other_code = other
-        for codon, old, new in zip(self._codons, self.code_sequence,
-                                   other_code):
-            if old != new:
-                changes[codon] = old + new
-        return changes
-
-
-_ncbi_genetic_code_data = [
-    [
+        rc = sequence.reverse_complement()
+
+        for reading_frame in range(1, 4):
+            yield self.translate(sequence, reading_frame=reading_frame,
+                                 start=start, stop=stop)
+        for reading_frame in range(1, 4):
+            yield self.translate(rc, reading_frame=reading_frame,
+                                 start=start, stop=stop)
+
+
+# defined at http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
+_ncbi_genetic_codes = {
+    1: GeneticCode(
         'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        1,
-        'Standard Nuclear',
         '---M---------------M---------------M----------------------------',
-    ],
-    [
+        'Standard'),
+    2: GeneticCode(
         'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG',
-        2,
-        'Vertebrate Mitochondrial',
         '--------------------------------MMMM---------------M------------',
-    ],
-    [
+        'Vertebrate Mitochondrial'),
+    3: GeneticCode(
         'FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        3,
-        'Yeast Mitochondrial',
         '----------------------------------MM----------------------------',
-    ],
-    [
+        'Yeast Mitochondrial'),
+    4: GeneticCode(
         'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        4,
-        'Mold, Protozoan, and Coelenterate Mitochondrial, and Mycoplasma/'
-        'Spiroplasma Nuclear',
         '--MM---------------M------------MMMM---------------M------------',
-    ],
-    [
+        'Mold, Protozoan, and Coelenterate Mitochondrial, and '
+        'Mycoplasma/Spiroplasma'),
+    5: GeneticCode(
         'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG',
-        5,
-        'Invertebrate Mitochondrial',
         '---M----------------------------MMMM---------------M------------',
-    ],
-    [
+        'Invertebrate Mitochondrial'),
+    6: GeneticCode(
         'FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        6,
-        'Ciliate, Dasycladacean and Hexamita Nuclear',
         '-----------------------------------M----------------------------',
-    ],
-    [
+        'Ciliate, Dasycladacean and Hexamita Nuclear'),
+    9: GeneticCode(
         'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
-        9,
-        'Echinoderm and Flatworm Mitochondrial',
         '-----------------------------------M---------------M------------',
-    ],
-    [
+        'Echinoderm and Flatworm Mitochondrial'),
+    10: GeneticCode(
         'FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        10,
-        'Euplotid Nuclear',
         '-----------------------------------M----------------------------',
-    ],
-    [
+        'Euplotid Nuclear'),
+    11: GeneticCode(
         'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        11,
-        'Bacterial Nuclear and Plant Plastid',
         '---M---------------M------------MMMM---------------M------------',
-    ],
-    [
+        'Bacterial, Archaeal and Plant Plastid'),
+    12: GeneticCode(
         'FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        12,
-        'Alternative Yeast Nuclear',
         '-------------------M---------------M----------------------------',
-    ],
-    [
+        'Alternative Yeast Nuclear'),
+    13: GeneticCode(
         'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG',
-        13,
-        'Ascidian Mitochondrial',
-        '-----------------------------------M----------------------------',
-    ],
-    [
+        '---M------------------------------MM---------------M------------',
+        'Ascidian Mitochondrial'),
+    14: GeneticCode(
         'FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
-        14,
-        'Alternative Flatworm Mitochondrial',
-        '-----------------------------------M----------------------------',
-    ],
-    [
-        'FFLLSSSSYY*QCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        15,
-        'Blepharisma Nuclear',
         '-----------------------------------M----------------------------',
-    ],
-    [
+        'Alternative Flatworm Mitochondrial'),
+    16: GeneticCode(
         'FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        16,
-        'Chlorophycean Mitochondrial',
         '-----------------------------------M----------------------------',
-    ],
-    [
+        'Chlorophycean Mitochondrial'),
+    21: GeneticCode(
         'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG',
-        20,
-        'Trematode Mitochondrial',
         '-----------------------------------M---------------M------------',
-    ],
-    [
+        'Trematode Mitochondrial'),
+    22: GeneticCode(
         'FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        22,
-        'Scenedesmus obliquus Mitochondrial',
         '-----------------------------------M----------------------------',
-    ],
-    [
+        'Scenedesmus obliquus Mitochondrial'),
+    23: GeneticCode(
         'FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-        23,
-        'Thraustochytrium Mitochondrial',
-    ],
-]
-
-
-def genetic_code(*id):
-    """``skbio.sequence.GeneticCode`` factory given an optional id.
-
-    Parameters
-    ----------
-    id : int or str optional
-        Indicates the ``skbio.sequence.GeneticCode`` to return. Must be in the
-        range of [1, 23] inclusive. If `id` is not provided, the Standard
-        Nuclear genetic code will be returned.
-
-    Returns
-    -------
-    skbio.sequence.GeneticCode
-
-    """
-    key = 1
-    if len(id) == 1:
-        key = int(id[0])
-    if len(id) > 1:
-        raise TypeError('genetic_code takes 0 or 1 arguments (%d given)'
-                        % len(id))
-    for n in _ncbi_genetic_code_data:
-        if n[1] == key:
-            return GeneticCode(*n)
-
-    raise ValueError('Genetic code could not be found for %d.' % id)
+        '--------------------------------M--M---------------M------------',
+        'Thraustochytrium Mitochondrial'),
+    24: GeneticCode(
+        'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSSKVVVVAAAADDEEGGGG',
+        '---M---------------M---------------M---------------M------------',
+        'Pterobranchia Mitochondrial'),
+    25: GeneticCode(
+        'FFLLSSSSYY**CCGWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
+        '---M-------------------------------M---------------M------------',
+        'Candidate Division SR1 and Gracilibacteria')
+}
diff --git a/skbio/sequence/_iupac_sequence.py b/skbio/sequence/_iupac_sequence.py
new file mode 100644
index 0000000..e777936
--- /dev/null
+++ b/skbio/sequence/_iupac_sequence.py
@@ -0,0 +1,601 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.utils import with_metaclass
+
+from abc import ABCMeta, abstractproperty
+from itertools import product
+
+import numpy as np
+from six import string_types
+
+from skbio.util._decorator import classproperty, overrides
+from skbio.util._misc import MiniRegistry
+from ._sequence import Sequence
+
+
+class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
+    """Store biological sequence data conforming to the IUPAC character set.
+
+    This is an abstract base class (ABC) that cannot be instantiated.
+
+    Attributes
+    ----------
+    values
+    metadata
+    positional_metadata
+    alphabet
+    gap_chars
+    nondegenerate_chars
+    degenerate_chars
+    degenerate_map
+
+    Raises
+    ------
+    ValueError
+        If sequence characters are not in the IUPAC character set [1]_.
+
+    See Also
+    --------
+    DNA
+    RNA
+    Protein
+
+    References
+    ----------
+    .. [1] Nomenclature for incompletely specified bases in nucleic acid
+       sequences: recommendations 1984.
+       Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
+       A Cornish-Bowden
+
+    """
+    # ASCII is built such that the difference between uppercase and lowercase
+    # is the 6th bit.
+    _ascii_invert_case_bit_offset = 32
+    _number_of_extended_ascii_codes = 256
+    _ascii_lowercase_boundary = 90
+    __validation_mask = None
+    __degenerate_codes = None
+    __nondegenerate_codes = None
+    __gap_codes = None
+
+    @classproperty
+    def _validation_mask(cls):
+        # TODO These masks could be defined (as literals) on each concrete
+        # object. For now, memoize!
+        if cls.__validation_mask is None:
+            cls.__validation_mask = np.invert(np.bincount(
+                np.fromstring(''.join(cls.alphabet), dtype=np.uint8),
+                minlength=cls._number_of_extended_ascii_codes).astype(bool))
+        return cls.__validation_mask
+
+    @classproperty
+    def _degenerate_codes(cls):
+        if cls.__degenerate_codes is None:
+            degens = cls.degenerate_chars
+            cls.__degenerate_codes = np.asarray([ord(d) for d in degens])
+        return cls.__degenerate_codes
+
+    @classproperty
+    def _nondegenerate_codes(cls):
+        if cls.__nondegenerate_codes is None:
+            nondegens = cls.nondegenerate_chars
+            cls.__nondegenerate_codes = np.asarray([ord(d) for d in nondegens])
+        return cls.__nondegenerate_codes
+
+    @classproperty
+    def _gap_codes(cls):
+        if cls.__gap_codes is None:
+            gaps = cls.gap_chars
+            cls.__gap_codes = np.asarray([ord(g) for g in gaps])
+        return cls.__gap_codes
+
+    @classproperty
+    def alphabet(cls):
+        """Return valid IUPAC characters.
+
+        This includes gap, non-degenerate, and degenerate characters.
+
+        Returns
+        -------
+        set
+            Valid IUPAC characters.
+
+        """
+        return cls.degenerate_chars | cls.nondegenerate_chars | cls.gap_chars
+
+    @classproperty
+    def gap_chars(cls):
+        """Return characters defined as gaps.
+
+        Returns
+        -------
+        set
+            Characters defined as gaps.
+
+        """
+        return set('-.')
+
+    @classproperty
+    def degenerate_chars(cls):
+        """Return degenerate IUPAC characters.
+
+        Returns
+        -------
+        set
+            Degenerate IUPAC characters.
+
+        """
+        return set(cls.degenerate_map)
+
+    @abstractproperty
+    @classproperty
+    def nondegenerate_chars(cls):
+        """Return non-degenerate IUPAC characters.
+
+        Returns
+        -------
+        set
+            Non-degenerate IUPAC characters.
+
+        """
+        return set()  # pragma: no cover
+
+    @abstractproperty
+    @classproperty
+    def degenerate_map(cls):
+        """Return mapping of degenerate to non-degenerate characters.
+
+        Returns
+        -------
+        dict (set)
+            Mapping of each degenerate IUPAC character to the set of
+            non-degenerate IUPAC characters it represents.
+
+        """
+        return set()  # pragma: no cover
+
+    @property
+    def _motifs(self):
+            return _motifs
+
+    @overrides(Sequence)
+    def __init__(self, sequence, metadata=None, positional_metadata=None,
+                 validate=True, lowercase=False):
+        super(IUPACSequence, self).__init__(
+            sequence, metadata, positional_metadata)
+
+        if lowercase is False:
+            pass
+        elif lowercase is True or isinstance(lowercase, string_types):
+            lowercase_mask = self._bytes > self._ascii_lowercase_boundary
+            self._convert_to_uppercase(lowercase_mask)
+
+            # If it isn't True, it must be a string_type
+            if not (lowercase is True):
+                self.positional_metadata[lowercase] = lowercase_mask
+        else:
+            raise TypeError("lowercase keyword argument expected a bool or "
+                            "string, but got %s" % type(lowercase))
+
+        if validate:
+            self._validate()
+
+    def _convert_to_uppercase(self, lowercase):
+        if np.any(lowercase):
+            with self._byte_ownership():
+                self._bytes[lowercase] ^= self._ascii_invert_case_bit_offset
+
+    def _validate(self):
+        # This is the fastest way that we have found to identify the
+        # presence or absence of certain characters (numbers).
+        # It works by multiplying a mask where the numbers which are
+        # permitted have a zero at their index, and all others have a one.
+        # The result is a vector which will propogate counts of invalid
+        # numbers and remove counts of valid numbers, so that we need only
+        # see if the array is empty to determine validity.
+        invalid_characters = np.bincount(
+            self._bytes, minlength=self._number_of_extended_ascii_codes
+        ) * self._validation_mask
+        if np.any(invalid_characters):
+            bad = list(np.where(
+                invalid_characters > 0)[0].astype(np.uint8).view('|S1'))
+            raise ValueError(
+                "Invalid character%s in sequence: %r. Valid IUPAC characters: "
+                "%r" % ('s' if len(bad) > 1 else '',
+                        [str(b.tostring().decode("ascii")) for b in bad] if
+                        len(bad) > 1 else bad[0],
+                        list(self.alphabet)))
+
+    def lowercase(self, lowercase):
+        """Return a case-sensitive string representation of the sequence.
+
+        Parameters
+        ----------
+        lowercase: str or boolean vector
+            If lowercase is a boolean vector, it is used to set sequence
+            characters to lowercase in the output string. True values in the
+            boolean vector correspond to lowercase characters. If lowercase
+            is a str, it is treated like a key into the positional metadata,
+            pointing to a column which must be a boolean vector.
+            That boolean vector is then used as described previously.
+
+        Returns
+        -------
+        str
+            String representation of sequence with specified characters set to
+            lowercase.
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('ACGT')
+        >>> s.lowercase([True, True, False, False])
+        'acGT'
+        >>> s = DNA('ACGT',
+        ...         positional_metadata={'exons': [True, False, False, True]})
+        >>> s.lowercase('exons')
+        'aCGt'
+
+        Constructor automatically populates a column in positional metadata
+        when the ``lowercase`` keyword argument is provided with a column name:
+
+        >>> s = DNA('ACgt', lowercase='introns')
+        >>> s.lowercase('introns')
+        'ACgt'
+        >>> s = DNA('ACGT', lowercase='introns')
+        >>> s.lowercase('introns')
+        'ACGT'
+
+        """
+        index = self._munge_to_index_array(lowercase)
+        outbytes = self._bytes.copy()
+        outbytes[index] ^= self._ascii_invert_case_bit_offset
+        return str(outbytes.tostring().decode('ascii'))
+
+    def gaps(self):
+        """Find positions containing gaps in the biological sequence.
+
+        Returns
+        -------
+        1D np.ndarray (bool)
+            Boolean vector where ``True`` indicates a gap character is present
+            at that position in the biological sequence.
+
+        See Also
+        --------
+        has_gaps
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('AC-G-')
+        >>> s.gaps()
+        array([False, False,  True, False,  True], dtype=bool)
+
+        """
+        return np.in1d(self._bytes, self._gap_codes)
+
+    def has_gaps(self):
+        """Determine if the sequence contains one or more gap characters.
+
+        Returns
+        -------
+        bool
+            Indicates whether there are one or more occurrences of gap
+            characters in the biological sequence.
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('ACACGACGTT')
+        >>> s.has_gaps()
+        False
+        >>> t = DNA('A.CAC--GACGTT')
+        >>> t.has_gaps()
+        True
+
+        """
+        # TODO use count, there aren't that many gap chars
+        # TODO: cache results
+        return bool(self.gaps().any())
+
+    def degenerates(self):
+        """Find positions containing degenerate characters in the sequence.
+
+        Returns
+        -------
+        1D np.ndarray (bool)
+            Boolean vector where ``True`` indicates a degenerate character is
+            present at that position in the biological sequence.
+
+        See Also
+        --------
+        has_degenerates
+        nondegenerates
+        has_nondegenerates
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('ACWGN')
+        >>> s.degenerates()
+        array([False, False,  True, False,  True], dtype=bool)
+
+        """
+        return np.in1d(self._bytes, self._degenerate_codes)
+
+    def has_degenerates(self):
+        """Determine if sequence contains one or more degenerate characters.
+
+        Returns
+        -------
+        bool
+            Indicates whether there are one or more occurrences of degenerate
+            characters in the biological sequence.
+
+        See Also
+        --------
+        degenerates
+        nondegenerates
+        has_nondegenerates
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('ACAC-GACGTT')
+        >>> s.has_degenerates()
+        False
+        >>> t = DNA('ANCACWWGACGTT')
+        >>> t.has_degenerates()
+        True
+
+        """
+        # TODO use bincount!
+        # TODO: cache results
+        return bool(self.degenerates().any())
+
+    def nondegenerates(self):
+        """Find positions containing non-degenerate characters in the sequence.
+
+        Returns
+        -------
+        1D np.ndarray (bool)
+            Boolean vector where ``True`` indicates a non-degenerate character
+            is present at that position in the biological sequence.
+
+        See Also
+        --------
+        has_nondegenerates
+        degenerates
+        has_nondegenerates
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('ACWGN')
+        >>> s.nondegenerates()
+        array([ True,  True, False,  True, False], dtype=bool)
+
+        """
+        return np.in1d(self._bytes, self._nondegenerate_codes)
+
+    def has_nondegenerates(self):
+        """Determine if sequence contains one or more non-degenerate characters
+
+        Returns
+        -------
+        bool
+            Indicates whether there are one or more occurrences of
+            non-degenerate characters in the biological sequence.
+
+        See Also
+        --------
+        nondegenerates
+        degenerates
+        has_degenerates
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('NWNNNNNN')
+        >>> s.has_nondegenerates()
+        False
+        >>> t = DNA('ANCACWWGACGTT')
+        >>> t.has_nondegenerates()
+        True
+
+        """
+        # TODO: cache results
+        return bool(self.nondegenerates().any())
+
+    def degap(self):
+        """Return a new sequence with gap characters removed.
+
+        Returns
+        -------
+        IUPACSequence
+            A new sequence with all gap characters removed.
+
+        See Also
+        --------
+        gap_chars
+
+        Notes
+        -----
+        The type and metadata of the result will be the same as the
+        biological sequence. If positional metadata is present, it will be
+        filtered in the same manner as the sequence characters and included in
+        the resulting degapped sequence.
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('GGTC-C--ATT-C.',
+        ...         positional_metadata={'quality':range(14)})
+        >>> s.degap()
+        DNA
+        -----------------------------
+        Positional metadata:
+            'quality': <dtype: int64>
+        Stats:
+            length: 9
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 55.56%
+        -----------------------------
+        0 GGTCCATTC
+
+        """
+        return self[np.invert(self.gaps())]
+
+    def expand_degenerates(self):
+        """Yield all possible non-degenerate versions of the sequence.
+
+        Yields
+        ------
+        IUPACSequence
+            Non-degenerate version of the sequence.
+
+        See Also
+        --------
+        degenerate_map
+
+        Notes
+        -----
+        There is no guaranteed ordering to the non-degenerate sequences that
+        are yielded.
+
+        Each non-degenerate sequence will have the same type, metadata,
+        and positional metadata as the biological sequence.
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> seq = DNA('TRG')
+        >>> seq_generator = seq.expand_degenerates()
+        >>> for s in sorted(seq_generator, key=str):
+        ...     s
+        ...     print('')
+        DNA
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 33.33%
+        -----------------------------
+        0 TAG
+        <BLANKLINE>
+        DNA
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 66.67%
+        -----------------------------
+        0 TGG
+        <BLANKLINE>
+
+        """
+        degen_chars = self.degenerate_map
+        nonexpansion_chars = self.nondegenerate_chars.union(self.gap_chars)
+
+        expansions = []
+        for char in self:
+            char = str(char)
+            if char in nonexpansion_chars:
+                expansions.append(char)
+            else:
+                expansions.append(degen_chars[char])
+
+        result = product(*expansions)
+        return (self._to(sequence=''.join(nondegen_seq)) for nondegen_seq in
+                result)
+
+    def find_motifs(self, motif_type, min_length=1, ignore=None):
+        """Search the biological sequence for motifs.
+
+        Options for `motif_type`:
+
+        Parameters
+        ----------
+        motif_type : str
+            Type of motif to find.
+        min_length : int, optional
+            Only motifs at least as long as `min_length` will be returned.
+        ignore : 1D array_like (bool), optional
+            Boolean vector indicating positions to ignore when matching.
+
+        Yields
+        ------
+        slice
+            Location of the motif in the biological sequence.
+
+        Raises
+        ------
+        ValueError
+            If an unknown `motif_type` is specified.
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('ACGGGGAGGCGGAG')
+        >>> for motif_slice in s.find_motifs('purine-run', min_length=2):
+        ...     motif_slice
+        ...     str(s[motif_slice])
+        slice(2, 9, None)
+        'GGGGAGG'
+        slice(10, 14, None)
+        'GGAG'
+
+        Gap characters can disrupt motifs:
+
+        >>> s = DNA('GG-GG')
+        >>> for motif_slice in s.find_motifs('purine-run'):
+        ...     motif_slice
+        slice(0, 2, None)
+        slice(3, 5, None)
+
+        Gaps can be ignored by passing the gap boolean vector to `ignore`:
+
+        >>> s = DNA('GG-GG')
+        >>> for motif_slice in s.find_motifs('purine-run', ignore=s.gaps()):
+        ...     motif_slice
+        slice(0, 5, None)
+
+        """
+        if motif_type not in self._motifs:
+            raise ValueError("Not a known motif (%r) for this sequence (%s)." %
+                             (motif_type, self.__class__.__name__))
+
+        return self._motifs[motif_type](self, min_length, ignore)
+
+    @overrides(Sequence)
+    def _constructor(self, **kwargs):
+        return self.__class__(validate=False, lowercase=False, **kwargs)
+
+    @overrides(Sequence)
+    def _repr_stats(self):
+        """Define custom statistics to display in the sequence's repr."""
+        stats = super(IUPACSequence, self)._repr_stats()
+        stats.append(('has gaps', '%r' % self.has_gaps()))
+        stats.append(('has degenerates', '%r' % self.has_degenerates()))
+        stats.append(('has non-degenerates', '%r' % self.has_nondegenerates()))
+        return stats
+
+
+_motifs = MiniRegistry()
+
+# Leave this at the bottom
+_motifs.interpolate(IUPACSequence, "find_motifs")
diff --git a/skbio/sequence/_nucleotide_mixin.py b/skbio/sequence/_nucleotide_mixin.py
new file mode 100644
index 0000000..6534c7e
--- /dev/null
+++ b/skbio/sequence/_nucleotide_mixin.py
@@ -0,0 +1,363 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.utils import with_metaclass
+
+from abc import ABCMeta, abstractproperty
+
+import numpy as np
+
+from skbio.util._decorator import classproperty
+from ._iupac_sequence import _motifs as parent_motifs
+
+
+class NucleotideMixin(with_metaclass(ABCMeta, object)):
+    """Mixin for adding funtionality for working with sequences of nucleotides.
+
+    This is an abstract base class (ABC) that cannot be instantiated.
+
+    Attributes
+    ----------
+    complement_map
+
+    See Also
+    --------
+    DNA
+    RNA
+
+    """
+    __complement_lookup = None
+    __gc_codes = None
+
+    @classproperty
+    def _complement_lookup(cls):
+        if cls.__complement_lookup is not None:
+            return cls.__complement_lookup
+
+        lookup = np.zeros(cls._number_of_extended_ascii_codes, dtype=np.uint8)
+        for key, value in cls.complement_map.items():
+            lookup[ord(key)] = ord(value)
+        cls.__complement_lookup = lookup
+        return lookup
+
+    @classproperty
+    def _gc_codes(cls):
+        if cls.__gc_codes is None:
+            gc_iupac_chars = 'GCS'
+            cls.__gc_codes = np.asarray([ord(g) for g in gc_iupac_chars])
+        return cls.__gc_codes
+
+    @property
+    def _motifs(self):
+        return _motifs
+
+    @abstractproperty
+    @classproperty
+    def complement_map(cls):
+        """Return mapping of nucleotide characters to their complements.
+
+        Returns
+        -------
+        dict
+            Mapping of each character to its complement.
+
+        Notes
+        -----
+        Complements cannot be defined for a generic nucleotide sequence because
+        the complement of ``A`` is ambiguous. Thanks, nature...
+
+        """
+        return set()  # pragma: no cover
+
+    def complement(self, reverse=False):
+        """Return the complement of the nucleotide sequence.
+
+        Parameters
+        ----------
+        reverse : bool, optional
+            If ``True``, return the reverse complement. If positional metadata
+            is present, it will be reversed.
+
+        Returns
+        -------
+        NucleotideMixin
+            The (reverse) complement of the nucleotide sequence. The type and
+            metadata of the result will be the same as the nucleotide
+            sequence. If `reverse` is ``True``, positional metadata
+            will be reversed if it is present.
+
+        See Also
+        --------
+        reverse_complement
+        complement_map
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> seq = DNA('TTCATT', positional_metadata={'quality':range(6)})
+        >>> seq
+        DNA
+        -----------------------------
+        Positional metadata:
+            'quality': <dtype: int64>
+        Stats:
+            length: 6
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 16.67%
+        -----------------------------
+        0 TTCATT
+        >>> seq.complement()
+        DNA
+        -----------------------------
+        Positional metadata:
+            'quality': <dtype: int64>
+        Stats:
+            length: 6
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 16.67%
+        -----------------------------
+        0 AAGTAA
+        >>> rc = seq.complement(reverse=True)
+        >>> rc
+        DNA
+        -----------------------------
+        Positional metadata:
+            'quality': <dtype: int64>
+        Stats:
+            length: 6
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 16.67%
+        -----------------------------
+        0 AATGAA
+        >>> rc.positional_metadata['quality'].values
+        array([5, 4, 3, 2, 1, 0])
+
+        """
+        result = self._complement_lookup[self._bytes]
+        complement = self._to(sequence=result)
+        if reverse:
+            complement = complement[::-1]
+        return complement
+
+    def reverse_complement(self):
+        """Return the reverse complement of the nucleotide sequence.
+
+        Returns
+        -------
+        NucleotideMixin
+            The reverse complement of the nucleotide sequence. The type and
+            metadata of the result will be the same as the nucleotide
+            sequence. If positional metadata is present, it will be reversed.
+
+        See Also
+        --------
+        complement
+        is_reverse_complement
+
+        Notes
+        -----
+        This method is equivalent to ``self.complement(reverse=True)``.
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> seq = DNA('TTCATT',
+        ...           positional_metadata={'quality':range(6)})
+        >>> seq = seq.reverse_complement()
+        >>> seq
+        DNA
+        -----------------------------
+        Positional metadata:
+            'quality': <dtype: int64>
+        Stats:
+            length: 6
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 16.67%
+        -----------------------------
+        0 AATGAA
+        >>> seq.positional_metadata['quality'].values
+        array([5, 4, 3, 2, 1, 0])
+
+
+        """
+        return self.complement(reverse=True)
+
+    def is_reverse_complement(self, other):
+        """Determine if a sequence is the reverse complement of this sequence.
+
+        Parameters
+        ----------
+        other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+            Sequence to compare to.
+
+        Returns
+        -------
+        bool
+            ``True`` if `other` is the reverse complement of the nucleotide
+            sequence.
+
+        Raises
+        ------
+        TypeError
+            If `other` is a ``Sequence`` object with a different type than the
+            nucleotide sequence.
+
+        See Also
+        --------
+        reverse_complement
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> DNA('TTCATT').is_reverse_complement('AATGAA')
+        True
+        >>> DNA('TTCATT').is_reverse_complement('AATGTT')
+        False
+        >>> DNA('ACGT').is_reverse_complement('ACGT')
+        True
+
+        """
+        other = self._munge_to_sequence(other, 'is_reverse_complement')
+
+        # avoid computing the reverse complement if possible
+        if len(self) != len(other):
+            return False
+        else:
+            # we reverse complement ourselves because `other` is a `Sequence`
+            # object at this point and we only care about comparing the
+            # underlying sequence data
+            return self.reverse_complement()._string == other._string
+
+    def gc_content(self):
+        """Calculate the relative frequency of G's and C's in the sequence.
+
+        This includes G, C, and S characters. This is equivalent to calling
+        ``gc_frequency(relative=True)``. Note that the sequence will be
+        degapped before the operation, so gap characters will not be included
+        when calculating the length of the sequence.
+
+        Returns
+        -------
+        float
+            Relative frequency of G's and C's in the sequence.
+
+        See Also
+        --------
+        gc_frequency
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> DNA('ACGT').gc_content()
+        0.5
+        >>> DNA('ACGTACGT').gc_content()
+        0.5
+        >>> DNA('ACTTAGTT').gc_content()
+        0.25
+        >>> DNA('ACGT--..').gc_content()
+        0.5
+        >>> DNA('--..').gc_content()
+        0
+
+        `S` means `G` or `C`, so it counts:
+
+        >>> DNA('ASST').gc_content()
+        0.5
+
+        Other degenerates don't count:
+
+        >>> DNA('RYKMBDHVN').gc_content()
+        0.0
+
+        """
+        return self.gc_frequency(relative=True)
+
+    def gc_frequency(self, relative=False):
+        """Calculate frequency of G's and C's in the sequence.
+
+        This calculates the minimum GC frequency, which corresponds to IUPAC
+        characters G, C, and S (which stands for G or C).
+
+        Parameters
+        ----------
+        relative : bool, optional
+            If False return the frequency of G, C, and S characters (ie the
+            count). If True return the relative frequency, ie the proportion
+            of G, C, and S characters in the sequence. In this case the
+            sequence will also be degapped before the operation, so gap
+            characters will not be included when calculating the length of the
+            sequence.
+
+        Returns
+        -------
+        int or float
+            Either frequency (count) or relative frequency (proportion),
+            depending on `relative`.
+
+        See Also
+        --------
+        gc_content
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> DNA('ACGT').gc_frequency()
+        2
+        >>> DNA('ACGT').gc_frequency(relative=True)
+        0.5
+        >>> DNA('ACGT--..').gc_frequency(relative=True)
+        0.5
+        >>> DNA('--..').gc_frequency(relative=True)
+        0
+
+        `S` means `G` or `C`, so it counts:
+
+        >>> DNA('ASST').gc_frequency()
+        2
+
+        Other degenerates don't count:
+
+        >>> DNA('RYKMBDHVN').gc_frequency()
+        0
+
+        """
+
+        counts = np.bincount(self._bytes,
+                             minlength=self._number_of_extended_ascii_codes)
+        gc = counts[self._gc_codes].sum()
+        if relative:
+            seq = self.degap()
+            if len(seq) != 0:
+                gc /= len(seq)
+        return gc
+
+
+_motifs = parent_motifs.copy()
+
+
+ at _motifs("purine-run")
+def _motif_purine_run(sequence, min_length, ignore):
+    """Identifies purine runs"""
+    return sequence.find_with_regex("([AGR]{%d,})" % min_length,
+                                    ignore=ignore)
+
+
+ at _motifs("pyrimidine-run")
+def _motif_pyrimidine_run(sequence, min_length, ignore):
+    """Identifies pyrimidine runs"""
+    return sequence.find_with_regex("([CTUY]{%d,})" % min_length,
+                                    ignore=ignore)
diff --git a/skbio/sequence/_protein.py b/skbio/sequence/_protein.py
new file mode 100644
index 0000000..e1f7863
--- /dev/null
+++ b/skbio/sequence/_protein.py
@@ -0,0 +1,215 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+
+from skbio.util._decorator import classproperty, overrides
+from skbio.util._decorator import stable
+from ._iupac_sequence import IUPACSequence, _motifs as parent_motifs
+
+
+class Protein(IUPACSequence):
+    """Store protein sequence data and optional associated metadata.
+
+    Only characters in the IUPAC protein character set [1]_ are supported.
+
+    Parameters
+    ----------
+    sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+        Characters representing the protein sequence itself.
+    metadata : dict, optional
+        Arbitrary metadata which applies to the entire sequence.
+    positional_metadata : Pandas DataFrame consumable, optional
+        Arbitrary per-character metadata. For example, quality data from
+        sequencing reads. Must be able to be passed directly to the Pandas
+        DataFrame constructor.
+    validate : bool, optional
+        If ``True``, validation will be performed to ensure that all sequence
+        characters are in the IUPAC protein character set. If ``False``,
+        validation will not be performed. Turning off validation will improve
+        runtime performance. If invalid characters are present, however, there
+        is **no guarantee that operations performed on the resulting object
+        will work or behave as expected.** Only turn off validation if you are
+        certain that the sequence characters are valid. To store sequence data
+        that is not IUPAC-compliant, use ``Sequence``.
+    lowercase : bool or str, optional
+        If ``True``, lowercase sequence characters will be converted to
+        uppercase characters in order to be valid IUPAC Protein characters. If
+        ``False``, no characters will be converted. If a str, it will be
+        treated as a key into the positional metadata of the object. All
+        lowercase characters will be converted to uppercase, and a ``True``
+        value will be stored in a boolean array in the positional metadata
+        under the key.
+
+
+    Attributes
+    ----------
+    values
+    metadata
+    positional_metadata
+    alphabet
+    gap_chars
+    stop_chars
+    nondegenerate_chars
+    degenerate_chars
+    degenerate_map
+
+    References
+    ----------
+    .. [1] Nomenclature for incompletely specified bases in nucleic acid
+       sequences: recommendations 1984.
+       Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
+       A Cornish-Bowden
+
+    Examples
+    --------
+    >>> from skbio import Protein
+    >>> Protein('PAW')
+    Protein
+    -----------------------------
+    Stats:
+        length: 3
+        has gaps: False
+        has degenerates: False
+        has non-degenerates: True
+        has stops: False
+    -----------------------------
+    0 PAW
+
+    Convert lowercase characters to uppercase:
+
+    >>> Protein('paW', lowercase=True)
+    Protein
+    -----------------------------
+    Stats:
+        length: 3
+        has gaps: False
+        has degenerates: False
+        has non-degenerates: True
+        has stops: False
+    -----------------------------
+    0 PAW
+
+    """
+    __stop_codes = None
+
+    @classproperty
+    def _stop_codes(cls):
+        if cls.__stop_codes is None:
+            stops = cls.stop_chars
+            cls.__stop_codes = np.asarray([ord(s) for s in stops])
+        return cls.__stop_codes
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    @overrides(IUPACSequence)
+    def alphabet(cls):
+        return super(Protein, cls).alphabet | cls.stop_chars
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    @overrides(IUPACSequence)
+    def nondegenerate_chars(cls):
+        return set("ACDEFGHIKLMNPQRSTVWY")
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    @overrides(IUPACSequence)
+    def degenerate_map(cls):
+        return {
+            "B": set("DN"), "Z": set("EQ"),
+            "X": set("ACDEFGHIKLMNPQRSTVWY")
+        }
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    def stop_chars(cls):
+        """Return characters representing translation stop codons.
+
+        Returns
+        -------
+        set
+            Characters representing translation stop codons.
+
+        """
+        return set('*')
+
+    @property
+    def _motifs(self):
+        return _motifs
+
+    @stable(as_of="0.4.0")
+    def stops(self):
+        """Find positions containing stop characters in the protein sequence.
+
+        Returns
+        -------
+        1D np.ndarray (bool)
+            Boolean vector where ``True`` indicates a stop character is present
+            at that position in the protein sequence.
+
+        See Also
+        --------
+        has_stops
+
+        Examples
+        --------
+        >>> from skbio import Protein
+        >>> s = Protein('PAW')
+        >>> s.stops()
+        array([False, False, False], dtype=bool)
+        >>> s = Protein('PAW*E*')
+        >>> s.stops()
+        array([False, False, False,  True, False,  True], dtype=bool)
+
+        """
+        return np.in1d(self._bytes, self._stop_codes)
+
+    @stable(as_of="0.4.0")
+    def has_stops(self):
+        """Determine if the sequence contains one or more stop characters.
+
+        Returns
+        -------
+        bool
+            Indicates whether there are one or more occurrences of stop
+            characters in the protein sequence.
+
+        Examples
+        --------
+        >>> from skbio import Protein
+        >>> s = Protein('PAW')
+        >>> s.has_stops()
+        False
+        >>> s = Protein('PAW*E*')
+        >>> s.has_stops()
+        True
+
+        """
+        return bool(self.stops().any())
+
+    @overrides(IUPACSequence)
+    def _repr_stats(self):
+        """Define custom statistics to display in the sequence's repr."""
+        stats = super(Protein, self)._repr_stats()
+        stats.append(('has stops', '%r' % self.has_stops()))
+        return stats
+
+
+_motifs = parent_motifs.copy()
+
+
+ at _motifs("N-glycosylation")
+def _motif_nitro_glycosylation(sequence, min_length, ignore):
+    """Identifies N-glycosylation runs"""
+    return sequence.find_with_regex("(N[^PX][ST][^PX])", ignore=ignore)
+
+# Leave this at the bottom
+_motifs.interpolate(Protein, "find_motifs")
diff --git a/skbio/sequence/_rna.py b/skbio/sequence/_rna.py
new file mode 100644
index 0000000..e05d0a2
--- /dev/null
+++ b/skbio/sequence/_rna.py
@@ -0,0 +1,351 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import skbio
+from skbio.util._decorator import classproperty, overrides
+from skbio.util._decorator import stable
+from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
+from ._iupac_sequence import IUPACSequence
+
+
+class RNA(IUPACSequence, NucleotideMixin):
+    """Store RNA sequence data and optional associated metadata.
+
+    Only characters in the IUPAC RNA character set [1]_ are supported.
+
+    Parameters
+    ----------
+    sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+        Characters representing the RNA sequence itself.
+    metadata : dict, optional
+        Arbitrary metadata which applies to the entire sequence.
+    positional_metadata : Pandas DataFrame consumable, optional
+        Arbitrary per-character metadata. For example, quality data from
+        sequencing reads. Must be able to be passed directly to the Pandas
+        DataFrame constructor.
+    validate : bool, optional
+        If ``True``, validation will be performed to ensure that all sequence
+        characters are in the IUPAC RNA character set. If ``False``, validation
+        will not be performed. Turning off validation will improve runtime
+        performance. If invalid characters are present, however, there is
+        **no guarantee that operations performed on the resulting object will
+        work or behave as expected.** Only turn off validation if you are
+        certain that the sequence characters are valid. To store sequence data
+        that is not IUPAC-compliant, use ``Sequence``.
+    lowercase : bool or str, optional
+        If ``True``, lowercase sequence characters will be converted to
+        uppercase characters in order to be valid IUPAC RNA characters. If
+        ``False``, no characters will be converted. If a str, it will be
+        treated as a key into the positional metadata of the object. All
+        lowercase characters will be converted to uppercase, and a ``True``
+        value will be stored in a boolean array in the positional metadata
+        under the key.
+
+
+    Attributes
+    ----------
+    values
+    metadata
+    positional_metadata
+    alphabet
+    gap_chars
+    nondegenerate_chars
+    degenerate_chars
+    degenerate_map
+    complement_map
+
+    See Also
+    --------
+    DNA
+
+    References
+    ----------
+    .. [1] Nomenclature for incompletely specified bases in nucleic acid
+       sequences: recommendations 1984.
+       Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
+       A Cornish-Bowden
+
+    Examples
+    --------
+    >>> from skbio import RNA
+    >>> RNA('ACCGAAU')
+    RNA
+    -----------------------------
+    Stats:
+        length: 7
+        has gaps: False
+        has degenerates: False
+        has non-degenerates: True
+        GC-content: 42.86%
+    -----------------------------
+    0 ACCGAAU
+
+    Convert lowercase characters to uppercase:
+
+    >>> RNA('AcCGaaU', lowercase=True)
+    RNA
+    -----------------------------
+    Stats:
+        length: 7
+        has gaps: False
+        has degenerates: False
+        has non-degenerates: True
+        GC-content: 42.86%
+    -----------------------------
+    0 ACCGAAU
+
+    """
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    @overrides(NucleotideMixin)
+    def complement_map(cls):
+        comp_map = {
+            'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
+            'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
+            'H': 'D', 'V': 'B', 'N': 'N'
+        }
+
+        comp_map.update({c: c for c in cls.gap_chars})
+        return comp_map
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    @overrides(IUPACSequence)
+    def nondegenerate_chars(cls):
+        return set("ACGU")
+
+    @classproperty
+    @stable(as_of="0.4.0")
+    @overrides(IUPACSequence)
+    def degenerate_map(cls):
+        return {
+            "R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
+            "W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
+            "H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
+        }
+
+    @property
+    def _motifs(self):
+        return _motifs
+
+    @stable(as_of="0.4.0")
+    def translate(self, genetic_code=1, *args, **kwargs):
+        """Translate RNA sequence into protein sequence.
+
+        Parameters
+        ----------
+        genetic_code : int, GeneticCode, optional
+            Genetic code to use in translation. If ``int``, used as a table ID
+            to look up the corresponding NCBI genetic code.
+        args : tuple
+            Positional arguments accepted by ``GeneticCode.translate``.
+        kwargs : dict
+            Keyword arguments accepted by ``GeneticCode.translate``.
+
+        Returns
+        -------
+        Protein
+            Translated sequence.
+
+        See Also
+        --------
+        GeneticCode.translate
+        GeneticCode.from_ncbi
+        translate_six_frames
+
+        Notes
+        -----
+        RNA sequence's metadata are included in the translated protein
+        sequence. Positional metadata are not included.
+
+        Examples
+        --------
+        Translate RNA into protein using NCBI's standard genetic code (table ID
+        1, the default genetic code in scikit-bio):
+
+        >>> from skbio import RNA
+        >>> rna = RNA('AUGCCACUUUAA')
+        >>> rna.translate()
+        Protein
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 MPL*
+
+        Translate the same RNA sequence using a different NCBI genetic code
+        (table ID 3, the yeast mitochondrial code) and specify that translation
+        must terminate at the first stop codon:
+
+        >>> rna.translate(3, stop='require')
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 MPT
+
+        """
+        if not isinstance(genetic_code, skbio.GeneticCode):
+            genetic_code = skbio.GeneticCode.from_ncbi(genetic_code)
+        return genetic_code.translate(self, *args, **kwargs)
+
+    @stable(as_of="0.4.0")
+    def translate_six_frames(self, genetic_code=1, *args, **kwargs):
+        """Translate RNA into protein using six possible reading frames.
+
+        The six possible reading frames are:
+
+        * 1 (forward)
+        * 2 (forward)
+        * 3 (forward)
+        * -1 (reverse)
+        * -2 (reverse)
+        * -3 (reverse)
+
+        Translated sequences are yielded in this order.
+
+        Parameters
+        ----------
+        genetic_code : int, GeneticCode, optional
+            Genetic code to use in translation. If ``int``, used as a table ID
+            to look up the corresponding NCBI genetic code.
+        args : tuple
+            Positional arguments accepted by
+            ``GeneticCode.translate_six_frames``.
+        kwargs : dict
+            Keyword arguments accepted by ``GeneticCode.translate_six_frames``.
+
+        Yields
+        ------
+        Protein
+            Translated sequence in the current reading frame.
+
+        See Also
+        --------
+        GeneticCode.translate_six_frames
+        GeneticCode.from_ncbi
+        translate
+
+        Notes
+        -----
+        This method is faster than (and equivalent to) performing six
+        independent translations using, for example:
+
+        ``(seq.translate(reading_frame=rf)
+        for rf in GeneticCode.reading_frames)``
+
+        RNA sequence's metadata are included in each translated protein
+        sequence. Positional metadata are not included.
+
+        Examples
+        --------
+        Translate RNA into protein using the six possible reading frames and
+        NCBI's standard genetic code (table ID 1, the default genetic code in
+        scikit-bio):
+
+        >>> from skbio import RNA
+        >>> rna = RNA('AUGCCACUUUAA')
+        >>> for protein in rna.translate_six_frames():
+        ...     protein
+        ...     print('')
+        Protein
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 MPL*
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 CHF
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 ATL
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 LKWH
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: True
+        -----------------------------
+        0 *SG
+        <BLANKLINE>
+        Protein
+        -----------------------------
+        Stats:
+            length: 3
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            has stops: False
+        -----------------------------
+        0 KVA
+        <BLANKLINE>
+
+        """
+        if not isinstance(genetic_code, skbio.GeneticCode):
+            genetic_code = skbio.GeneticCode.from_ncbi(genetic_code)
+        return genetic_code.translate_six_frames(self, *args, **kwargs)
+
+    @overrides(IUPACSequence)
+    def _repr_stats(self):
+        """Define custom statistics to display in the sequence's repr."""
+        stats = super(RNA, self)._repr_stats()
+        stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
+        return stats
+
+
+_motifs = _parent_motifs.copy()
+
+# Leave this at the bottom
+_motifs.interpolate(RNA, "find_motifs")
diff --git a/skbio/sequence/_sequence.py b/skbio/sequence/_sequence.py
index 52baffe..73ffe70 100644
--- a/skbio/sequence/_sequence.py
+++ b/skbio/sequence/_sequence.py
@@ -9,76 +9,65 @@
 from __future__ import absolute_import, division, print_function
 from future.builtins import range
 from future.utils import viewitems
-from six import string_types
+import six
 
+import itertools
+import math
 import re
-import warnings
-from collections import Sequence, Counter, defaultdict
-from itertools import product
+import collections
+import copy
+import numbers
+import textwrap
+from contextlib import contextmanager
 
 import numpy as np
 from scipy.spatial.distance import hamming
 
+import pandas as pd
+
 from skbio._base import SkbioObject
-from skbio.sequence import BiologicalSequenceError
+from skbio.sequence._base import ElasticLines
+from skbio.util._misc import chunk_str
+from skbio.util._decorator import stable, experimental
+
 
+class Sequence(collections.Sequence, SkbioObject):
+    """Store biological sequence data and optional associated metadata.
 
-class BiologicalSequence(Sequence, SkbioObject):
-    """Base class for biological sequences.
+    ``Sequence`` objects do not enforce an alphabet and are thus the most
+    generic objects for storing biological sequence data. Subclasses ``DNA``,
+    ``RNA``, and ``Protein`` enforce the IUPAC character set [1]_ for, and
+    provide operations specific to, each respective molecule type.
+
+    ``Sequence`` objects consist of the underlying sequence data, as well
+    as optional metadata and positional metadata. The underlying sequence
+    is immutable, while the metdata and positional metadata are mutable.
 
     Parameters
     ----------
-    sequence : python Sequence (e.g., str, list or tuple)
-        The biological sequence.
-    id : str, optional
-        The sequence id (e.g., an accession number).
-    description : str, optional
-        A description or comment about the sequence (e.g., "green
-        fluorescent protein").
-    quality : 1-D array_like, int, optional
-        Phred quality scores stored as nonnegative integers, one per sequence
-        character. If provided, must be the same length as the biological
-        sequence. Can be a 1-D ``numpy.ndarray`` of integers, or a structure
-        that can be converted to this representation using ``numpy.asarray``. A
-        copy will *not* be made if `quality` is already a 1-D ``numpy.ndarray``
-        with an ``int`` ``dtype``. The array will be made read-only (i.e., its
-        ``WRITEABLE`` flag will be set to ``False``).
-    validate : bool, optional
-        If True, runs the `is_valid` method after construction and raises
-        BiologicalSequenceError if ``is_valid == False``.
+    sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+        Characters representing the biological sequence itself.
+    metadata : dict, optional
+        Arbitrary metadata which applies to the entire sequence. A shallow copy
+        of the ``dict`` will be made (see Examples section below for details).
+    positional_metadata : pd.DataFrame consumable, optional
+        Arbitrary per-character metadata (e.g., sequence read quality
+        scores). Must be able to be passed directly to ``pd.DataFrame``
+        constructor. Each column of metadata must be the same length as the
+        biological sequence. A shallow copy of the positional metadata will be
+        made if necessary (see Examples section below for details).
 
     Attributes
     ----------
-    sequence
-    id
-    description
-    quality
-
-    Raises
-    ------
-    skbio.sequence.BiologicalSequenceError
-        If ``validate == True`` and ``is_valid == False``, or if `quality` is
-        not the correct shape.
+    values
+    metadata
+    positional_metadata
 
     See Also
     --------
-    NucleotideSequence
-    DNASequence
-    RNASequence
-
-    Notes
-    -----
-    `BiologicalSequence` objects are immutable. Where applicable, methods
-    return a new object of the same class.
-    Subclasses are typically defined by methods relevant to only a specific
-    type of biological sequence, and by containing characters only contained in
-    the IUPAC standard character set [1]_ for that molecule type.
-
-    Examples
-    --------
-    >>> from skbio.sequence import BiologicalSequence
-    >>> s = BiologicalSequence('GGUCGUGAAGGA')
-    >>> t = BiologicalSequence('GGUCCUGAAGGU')
+    DNA
+    RNA
+    Protein
 
     References
     ----------
@@ -87,1883 +76,2217 @@ class BiologicalSequence(Sequence, SkbioObject):
        Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
        A Cornish-Bowden
 
+    Examples
+    --------
+    >>> from pprint import pprint
+    >>> from skbio import Sequence
+
+    **Creating sequences:**
+
+    Create a sequence without any metadata:
+
+    >>> seq = Sequence('GGUCGUGAAGGA')
+    >>> seq
+    Sequence
+    ---------------
+    Stats:
+        length: 12
+    ---------------
+    0 GGUCGUGAAG GA
+
+    Create a sequence with metadata and positional metadata:
+
+    >>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
+    >>> positional_metadata = {'quality': [3, 3, 4, 10],
+    ...                        'exons': [True, True, False, True]}
+    >>> seq = Sequence('ACGT', metadata=metadata,
+    ...                positional_metadata=positional_metadata)
+    >>> seq
+    Sequence
+    -----------------------------
+    Metadata:
+        'authors': <type 'list'>
+        'desc': 'seq desc'
+        'id': 'seq-id'
+    Positional metadata:
+        'exons': <dtype: bool>
+        'quality': <dtype: int64>
+    Stats:
+        length: 4
+    -----------------------------
+    0 ACGT
+
+    **Retrieving underlying sequence data:**
+
+    Retrieve underlying sequence:
+
+    >>> seq.values # doctest: +NORMALIZE_WHITESPACE
+    array(['A', 'C', 'G', 'T'],
+          dtype='|S1')
+
+    Underlying sequence immutable:
+
+    >>> seq.values = np.array(['T', 'C', 'G', 'A'], dtype='|S1')
+    Traceback (most recent call last):
+        ...
+    AttributeError: can't set attribute
+
+    >>> seq.values[0] = 'T'
+    Traceback (most recent call last):
+        ...
+    ValueError: assignment destination is read-only
+
+    **Retrieving sequence metadata:**
+
+    Retrieve metadata:
+
+    >>> pprint(seq.metadata) # using pprint to display dict in sorted order
+    {'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
+
+    Retrieve positional metadata:
+
+    >>> seq.positional_metadata
+       exons  quality
+    0   True        3
+    1   True        3
+    2  False        4
+    3   True       10
+
+    **Updating sequence metadata:**
+
+    .. warning:: Be aware that a shallow copy of ``metadata`` and
+       ``positional_metadata`` is made for performance. Since a deep copy is
+       not made, changes made to mutable Python objects stored as metadata may
+       affect the metadata of other ``Sequence`` objects or anything else that
+       shares a reference to the object. The following examples illustrate this
+       behavior.
+
+    First, let's create a sequence and update its metadata:
+
+    >>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
+    >>> seq = Sequence('ACGT', metadata=metadata)
+    >>> seq.metadata['id'] = 'new-id'
+    >>> seq.metadata['pubmed'] = 12345
+    >>> pprint(seq.metadata)
+    {'authors': ['Alice'], 'desc': 'seq desc', 'id': 'new-id', 'pubmed': 12345}
+
+    Note that the original metadata dictionary (stored in variable
+    ``metadata``) hasn't changed because a shallow copy was made:
+
+    >>> pprint(metadata)
+    {'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
+    >>> seq.metadata == metadata
+    False
+
+    Note however that since only a *shallow* copy was made, updates to mutable
+    objects will also change the original metadata dictionary:
+
+    >>> seq.metadata['authors'].append('Bob')
+    >>> seq.metadata['authors']
+    ['Alice', 'Bob']
+    >>> metadata['authors']
+    ['Alice', 'Bob']
+
+    This behavior can also occur when manipulating a sequence that has been
+    derived from another sequence:
+
+    >>> subseq = seq[1:3]
+    >>> subseq
+    Sequence
+    ----------------------------
+    Metadata:
+        'authors': <type 'list'>
+        'desc': 'seq desc'
+        'id': 'new-id'
+        'pubmed': 12345
+    Stats:
+        length: 2
+    ----------------------------
+    0 CG
+    >>> pprint(subseq.metadata)
+    {'authors': ['Alice', 'Bob'],
+     'desc': 'seq desc',
+     'id': 'new-id',
+     'pubmed': 12345}
+
+    The subsequence has inherited the metadata of its parent sequence. If we
+    update the subsequence's author list, we see the changes propagated in the
+    parent sequence and original metadata dictionary:
+
+    >>> subseq.metadata['authors'].append('Carol')
+    >>> subseq.metadata['authors']
+    ['Alice', 'Bob', 'Carol']
+    >>> seq.metadata['authors']
+    ['Alice', 'Bob', 'Carol']
+    >>> metadata['authors']
+    ['Alice', 'Bob', 'Carol']
+
+    The behavior for updating positional metadata is similar. Let's create a
+    new sequence with positional metadata that is already stored in a
+    ``pd.DataFrame``:
+
+    >>> positional_metadata = pd.DataFrame(
+    ...     {'quality': [3, 3, 4, 10], 'list': [[], [], [], []]})
+    >>> seq = Sequence('ACGT', positional_metadata=positional_metadata)
+    >>> seq
+    Sequence
+    -----------------------------
+    Positional metadata:
+        'list': <dtype: object>
+        'quality': <dtype: int64>
+    Stats:
+        length: 4
+    -----------------------------
+    0 ACGT
+    >>> seq.positional_metadata
+      list  quality
+    0   []        3
+    1   []        3
+    2   []        4
+    3   []       10
+
+    Now let's update the sequence's positional metadata by adding a new column
+    and changing a value in another column:
+
+    >>> seq.positional_metadata['gaps'] = [False, False, False, False]
+    >>> seq.positional_metadata.loc[0, 'quality'] = 999
+    >>> seq.positional_metadata
+      list  quality   gaps
+    0   []      999  False
+    1   []        3  False
+    2   []        4  False
+    3   []       10  False
+
+    Note that the original positional metadata (stored in variable
+    ``positional_metadata``) hasn't changed because a shallow copy was made:
+
+    >>> positional_metadata
+      list  quality
+    0   []        3
+    1   []        3
+    2   []        4
+    3   []       10
+    >>> seq.positional_metadata.equals(positional_metadata)
+    False
+
+    Next let's create a sequence that has been derived from another sequence:
+
+    >>> subseq = seq[1:3]
+    >>> subseq
+    Sequence
+    -----------------------------
+    Positional metadata:
+        'list': <dtype: object>
+        'quality': <dtype: int64>
+        'gaps': <dtype: bool>
+    Stats:
+        length: 2
+    -----------------------------
+    0 CG
+    >>> subseq.positional_metadata
+      list  quality   gaps
+    0   []        3  False
+    1   []        4  False
+
+    As described above for metadata, since only a *shallow* copy was made of
+    the positional metadata, updates to mutable objects will also change the
+    parent sequence's positional metadata and the original positional metadata
+    ``pd.DataFrame``:
+
+    >>> subseq.positional_metadata.loc[0, 'list'].append('item')
+    >>> subseq.positional_metadata
+         list  quality   gaps
+    0  [item]        3  False
+    1      []        4  False
+    >>> seq.positional_metadata
+         list  quality   gaps
+    0      []      999  False
+    1  [item]        3  False
+    2      []        4  False
+    3      []       10  False
+    >>> positional_metadata
+         list  quality
+    0      []        3
+    1  [item]        3
+    2      []        4
+    3      []       10
+
     """
     default_write_format = 'fasta'
+    __hash__ = None
 
-    @classmethod
-    def alphabet(cls):
-        """Return the set of characters allowed in a `BiologicalSequence`.
+    @property
+    @stable(as_of="0.4.0")
+    def values(self):
+        """Array containing underlying sequence characters.
 
-        Returns
-        -------
-        set
-            Characters that are allowed in a valid `BiologicalSequence`.
+        Notes
+        -----
+        This property is not writeable.
 
-        See Also
+        Examples
         --------
-        is_valid
-        gap_alphabet
-        unsupported_characters
-        has_unsupported_characters
+        >>> from skbio import Sequence
+        >>> s = Sequence('AACGA')
+        >>> s.values # doctest: +NORMALIZE_WHITESPACE
+        array(['A', 'A', 'C', 'G', 'A'],
+              dtype='|S1')
 
         """
-        return cls.iupac_characters()
+        return self._bytes.view('|S1')
 
-    @classmethod
-    def gap_alphabet(cls):
-        """Return the set of characters defined as gaps.
+    @property
+    @stable(as_of="0.4.0")
+    def metadata(self):
+        """``dict`` containing metadata which applies to the entire sequence.
 
-        Returns
-        -------
-        set
-            Characters defined as gaps in a `BiologicalSequence`
+        Notes
+        -----
+        This property can be set and deleted.
 
-        See Also
+        Examples
         --------
-        alphabet
-        unsupported_characters
-        has_unsupported_characters
-        degap
-        gap_maps
-        gap_vector
+        >>> from pprint import pprint
+        >>> from skbio import Sequence
 
-        """
-        return set('-.')
+        Create a sequence with metadata:
 
-    @classmethod
-    def iupac_degenerate_characters(cls):
-        """Return the degenerate IUPAC characters.
+        >>> s = Sequence('ACGTACGTACGTACGT',
+        ...              metadata={'id': 'seq-id',
+        ...                        'description': 'seq description'})
+        >>> s
+        Sequence
+        ------------------------------------
+        Metadata:
+            'description': 'seq description'
+            'id': 'seq-id'
+        Stats:
+            length: 16
+        ------------------------------------
+        0 ACGTACGTAC GTACGT
 
-        Returns
-        -------
-        set
-            Degenerate IUPAC characters.
+        Retrieve metadata:
 
-        """
-        return set(cls.iupac_degeneracies())
+        >>> pprint(s.metadata) # using pprint to display dict in sorted order
+        {'description': 'seq description', 'id': 'seq-id'}
 
-    @classmethod
-    def iupac_characters(cls):
-        """Return the non-degenerate and degenerate characters.
+        Update metadata:
 
-        Returns
-        -------
-        set
-            Non-degenerate and degenerate characters.
+        >>> s.metadata['id'] = 'new-id'
+        >>> s.metadata['pubmed'] = 12345
+        >>> pprint(s.metadata)
+        {'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
 
-        """
-        return (cls.iupac_standard_characters() |
-                cls.iupac_degenerate_characters())
+        Set metadata:
 
-    @classmethod
-    def iupac_standard_characters(cls):
-        """Return the non-degenerate IUPAC characters.
+        >>> s.metadata = {'abc': 123}
+        >>> s.metadata
+        {'abc': 123}
 
-        Returns
-        -------
-        set
-            Non-degenerate IUPAC characters.
+        Delete metadata:
+
+        >>> s.has_metadata()
+        True
+        >>> del s.metadata
+        >>> s.metadata
+        {}
+        >>> s.has_metadata()
+        False
 
         """
-        return set()
+        if self._metadata is None:
+            # not using setter to avoid copy
+            self._metadata = {}
+        return self._metadata
+
+    @metadata.setter
+    def metadata(self, metadata):
+        if not isinstance(metadata, dict):
+            raise TypeError("metadata must be a dict")
+        # shallow copy
+        self._metadata = metadata.copy()
+
+    @metadata.deleter
+    def metadata(self):
+        self._metadata = None
 
-    @classmethod
-    def iupac_degeneracies(cls):
-        """Return the mapping of degenerate to non-degenerate characters.
+    @property
+    @stable(as_of="0.4.0")
+    def positional_metadata(self):
+        """``pd.DataFrame`` containing metadata on a per-character basis.
 
-        Returns
-        -------
-        dict of sets
-            Mapping of IUPAC degenerate character to the set of
-            non-degenerate IUPAC characters it represents.
+        Notes
+        -----
+        This property can be set and deleted.
+
+        Examples
+        --------
+        Create a DNA sequence with positional metadata:
+
+        >>> from skbio import DNA
+        >>> seq = DNA(
+        ...     'ACGT',
+        ...     positional_metadata={'quality': [3, 3, 20, 11],
+        ...                          'exons': [True, True, False, True]})
+        >>> seq
+        DNA
+        -----------------------------
+        Positional metadata:
+            'exons': <dtype: bool>
+            'quality': <dtype: int64>
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has non-degenerates: True
+            GC-content: 50.00%
+        -----------------------------
+        0 ACGT
+
+        Retrieve positional metadata:
+
+        >>> seq.positional_metadata
+           exons  quality
+        0   True        3
+        1   True        3
+        2  False       20
+        3   True       11
+
+        Update positional metadata:
+
+        >>> seq.positional_metadata['gaps'] = seq.gaps()
+        >>> seq.positional_metadata
+           exons  quality   gaps
+        0   True        3  False
+        1   True        3  False
+        2  False       20  False
+        3   True       11  False
+
+        Set positional metadata:
+
+        >>> seq.positional_metadata = {'degenerates': seq.degenerates()}
+        >>> seq.positional_metadata
+          degenerates
+        0       False
+        1       False
+        2       False
+        3       False
+
+        Delete positional metadata:
+
+        >>> seq.has_positional_metadata()
+        True
+        >>> del seq.positional_metadata
+        >>> seq.positional_metadata
+        Empty DataFrame
+        Columns: []
+        Index: [0, 1, 2, 3]
+        >>> seq.has_positional_metadata()
+        False
 
         """
-        return {}
+        if self._positional_metadata is None:
+            # not using setter to avoid copy
+            self._positional_metadata = pd.DataFrame(
+                index=np.arange(len(self)))
+        return self._positional_metadata
+
+    @positional_metadata.setter
+    def positional_metadata(self, positional_metadata):
+        try:
+            # copy=True to copy underlying data buffer
+            positional_metadata = pd.DataFrame(positional_metadata, copy=True)
+        except pd.core.common.PandasError as e:
+            raise TypeError('Positional metadata invalid. Must be consumable '
+                            'by pd.DataFrame. Original pandas error message: '
+                            '"%s"' % e)
+
+        num_rows = len(positional_metadata.index)
+        if num_rows != len(self):
+            raise ValueError(
+                "Number of positional metadata values (%d) must match the "
+                "number of characters in the sequence (%d)." %
+                (num_rows, len(self)))
+
+        positional_metadata.reset_index(drop=True, inplace=True)
+        self._positional_metadata = positional_metadata
+
+    @positional_metadata.deleter
+    def positional_metadata(self):
+        self._positional_metadata = None
+
+    @property
+    def _string(self):
+        return self._bytes.tostring()
+
+    @stable(as_of="0.4.0")
+    def __init__(self, sequence, metadata=None,
+                 positional_metadata=None):
+
+        if isinstance(sequence, np.ndarray):
+            if sequence.dtype == np.uint8:
+                self._set_bytes_contiguous(sequence)
+            elif sequence.dtype == '|S1':
+                sequence = sequence.view(np.uint8)
+                # Guarantee the sequence is an array (might be scalar before
+                # this).
+                if sequence.shape == ():
+                    sequence = np.array([sequence], dtype=np.uint8)
+                self._set_bytes_contiguous(sequence)
+            else:
+                raise TypeError(
+                    "Can only create sequence from numpy.ndarray of dtype "
+                    "np.uint8 or '|S1'. Invalid dtype: %s" %
+                    sequence.dtype)
+        elif isinstance(sequence, Sequence):
+            # we're not simply accessing sequence.metadata in order to avoid
+            # creating "empty" metadata representations on both sequence
+            # objects if they don't have metadata. same strategy is used below
+            # for positional metadata
+            if metadata is None and sequence.has_metadata():
+                metadata = sequence.metadata
+            if (positional_metadata is None and
+                    sequence.has_positional_metadata()):
+                positional_metadata = sequence.positional_metadata
+            sequence = sequence._bytes
+
+            self._owns_bytes = False
+
+            self._set_bytes(sequence)
 
-    def __init__(self, sequence, id="", description="", quality=None,
-                 validate=False):
-        if not isinstance(sequence, string_types):
-            sequence = ''.join(sequence)
-        self._sequence = sequence
+        else:
+            # Python 3 will not raise a UnicodeEncodeError so we force it by
+            # encoding it as ascii
+            if isinstance(sequence, six.text_type):
+                sequence = sequence.encode("ascii")
+            s = np.fromstring(sequence, dtype=np.uint8)
+
+            # There are two possibilities (to our knowledge) at this point:
+            # Either the sequence we were given was something string-like,
+            # (else it would not have made it past fromstring), or it was a
+            # numpy scalar, and so our length must be 1.
+            if isinstance(sequence, np.generic) and len(s) != 1:
+                raise TypeError("Can cannot create a sequence with %r" %
+                                type(sequence).__name__)
+
+            sequence = s
+            self._owns_bytes = True
+
+            self._set_bytes(sequence)
+
+        if metadata is None:
+            self._metadata = None
+        else:
+            self.metadata = metadata
 
-        self._id = id
-        self._description = description
-        self._set_quality(quality)
+        if positional_metadata is None:
+            self._positional_metadata = None
+        else:
+            self.positional_metadata = positional_metadata
+
+    def _set_bytes_contiguous(self, sequence):
+        """Munge the sequence data into a numpy array of dtype uint8."""
+        if not sequence.flags['C_CONTIGUOUS']:
+            # numpy doesn't support views of non-contiguous arrays. Since we're
+            # making heavy use of views internally, and users may also supply
+            # us with a view, make sure we *always* store a contiguous array to
+            # avoid hard-to-track bugs. See
+            # https://github.com/numpy/numpy/issues/5716
+            sequence = np.ascontiguousarray(sequence)
+            self._owns_bytes = True
+        else:
+            self._owns_bytes = False
+        self._set_bytes(sequence)
 
-        if validate and not self.is_valid():
-            unsupported_chars = self.unsupported_characters()
-            raise BiologicalSequenceError(
-                "Sequence contains unsupported characters: %s"
-                % (" ".join(unsupported_chars)))
+    def _set_bytes(self, sequence):
+        sequence.flags.writeable = False
+        self._bytes = sequence
 
-    def __contains__(self, other):
-        """The in operator.
+    @stable(as_of="0.4.0")
+    def __contains__(self, subsequence):
+        """Determine if a subsequence is contained in the biological sequence.
 
         Parameters
         ----------
-        other : str
+        subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
             The putative subsequence.
 
         Returns
         -------
         bool
-            Indicates whether `other` is contained in `self`.
+            Indicates whether `subsequence` is contained in the biological
+            sequence.
+
+        Raises
+        ------
+        TypeError
+            If `subsequence` is a ``Sequence`` object with a different type
+            than the biological sequence.
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUCGUGAAGGA')
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUCGUGAAGGA')
         >>> 'GGU' in s
         True
         >>> 'CCC' in s
         False
 
-        .. shownumpydoc
-
         """
-        return other in self._sequence
+        return self._munge_to_bytestring(subsequence, "in") in self._string
 
+    @stable(as_of="0.4.0")
     def __eq__(self, other):
-        """The equality operator.
+        """Determine if the biological sequence is equal to another.
 
-        Biological sequences are equal if their sequence is the same and they
-        are the same type. Identifier, description, and quality scores
-        **are ignored**.
+        Biological sequences are equal if they are *exactly* the same type and
+        their sequence characters, metadata, and positional metadata are the
+        same.
 
         Parameters
         ----------
-        other : `BiologicalSequence`
-            The sequence to test for equality against.
+        other : Sequence
+            Sequence to test for equality against.
 
         Returns
         -------
         bool
-            Indicates whether `self` and `other` are equal.
+            Indicates whether the biological sequence is equal to `other`.
 
-        See Also
+        Examples
         --------
-        __ne__
-        equals
+        Define two biological sequences that have the same underlying sequence
+        of characters:
 
-        Notes
-        -----
-        See ``BiologicalSequence.equals`` for more fine-grained control of
-        equality testing.
+        >>> from skbio import Sequence
+        >>> s = Sequence('ACGT')
+        >>> t = Sequence('ACGT')
 
-        This method is equivalent to
-        ``self.equals(other, ignore=['id', 'description', 'quality'])``.
+        The two sequences are considered equal because they are the same type,
+        their underlying sequence of characters are the same, and their
+        optional metadata attributes (``metadata`` and ``positional_metadata``)
+        were not provided:
 
-        Examples
-        --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUCGUGAAGGA')
-        >>> t = BiologicalSequence('GGUCGUGAAGGA')
         >>> s == t
         True
-        >>> u = BiologicalSequence('GGUCGUGACCGA')
+        >>> t == s
+        True
+
+        Define another biological sequence with a different sequence of
+        characters than the previous two biological sequences:
+
+        >>> u = Sequence('ACGA')
         >>> u == t
         False
 
-        Note that even though the quality scores do not match between ``u`` and
-        ``v``, they are considered equal:
+        Define a biological sequence with the same sequence of characters as
+        ``u`` but with different metadata and positional metadata:
 
-        >>> v = BiologicalSequence('GGUCGUGACCGA',
-        ...                        quality=[1, 5, 3, 3, 2, 42, 100, 9, 10, 55,
-        ...                                 42, 42])
-        >>> u == v
-        True
+        >>> v = Sequence('ACGA', metadata={'id': 'abc'},
+        ...              positional_metadata={'quality':[1, 5, 3, 3]})
 
-        .. shownumpydoc
+        The two sequences are not considered equal because their metadata and
+        positional metadata do not match:
+
+        >>> u == v
+        False
 
         """
-        return self.equals(other, ignore=['id', 'description', 'quality'])
+        # checks ordered from least to most expensive
+        if self.__class__ != other.__class__:
+            return False
+
+        # we're not simply comparing self.metadata to other.metadata in order
+        # to avoid creating "empty" metadata representations on the sequence
+        # objects if they don't have metadata. same strategy is used below for
+        # positional metadata
+        if self.has_metadata() and other.has_metadata():
+            if self.metadata != other.metadata:
+                return False
+        elif not (self.has_metadata() or other.has_metadata()):
+            # both don't have metadata
+            pass
+        else:
+            # one has metadata while the other does not
+            return False
+
+        if self._string != other._string:
+            return False
+
+        if self.has_positional_metadata() and other.has_positional_metadata():
+            if not self.positional_metadata.equals(other.positional_metadata):
+                return False
+        elif not (self.has_positional_metadata() or
+                  other.has_positional_metadata()):
+            # both don't have positional metadata
+            pass
+        else:
+            # one has positional metadata while the other does not
+            return False
+
+        return True
+
+    @stable(as_of="0.4.0")
+    def __ne__(self, other):
+        """Determine if the biological sequence is not equal to another.
 
-    def __getitem__(self, i):
-        """The indexing operator.
+        Biological sequences are not equal if they are not *exactly* the same
+        type, or their sequence characters, metadata, or positional metadata
+        differ.
 
         Parameters
         ----------
-        i : int, slice, or sequence of ints
-            The position(s) to return from the `BiologicalSequence`. If `i` is
-            a sequence of ints, these are assumed to be indices in the sequence
-            to keep.
+        other : Sequence
+            Sequence to test for inequality against.
 
         Returns
         -------
-        BiologicalSequence
-            New biological sequence containing the character(s) at position(s)
-            `i` in the current `BiologicalSequence`. If quality scores are
-            present, the quality score at position(s) `i` will be included in
-            the returned sequence. ID and description are also included.
+        bool
+            Indicates whether the biological sequence is not equal to `other`.
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUCGUGAAGGA')
-
-        Obtain a single character from the biological sequence:
-
-        >>> s[1]
-        <BiologicalSequence: G (length: 1)>
-
-        Obtain a slice:
-
-        >>> s[7:]
-        <BiologicalSequence: AAGGA (length: 5)>
-
-        Obtain characters at the following indices:
-
-        >>> s[[3, 4, 7, 0, 3]]
-        <BiologicalSequence: CGAGC (length: 5)>
-
-        .. shownumpydoc
+        >>> from skbio import Sequence
+        >>> s = Sequence('ACGT')
+        >>> t = Sequence('ACGT')
+        >>> s != t
+        False
+        >>> u = Sequence('ACGA')
+        >>> u != t
+        True
+        >>> v = Sequence('ACGA', metadata={'id': 'v'})
+        >>> u != v
+        True
 
         """
-        # TODO update this method when #60 is resolved. we have to deal with
-        # discrepancies in indexing rules between str and ndarray... hence the
-        # ugly code
-        try:
-            try:
-                seq = self.sequence[i]
-                qual = self.quality[i] if self.has_quality() else None
-            except TypeError:
-                seq = [self.sequence[idx] for idx in i]
-
-                if self.has_quality():
-                    qual = [self.quality[idx] for idx in i]
-                else:
-                    qual = None
-        except IndexError:
-            raise IndexError(
-                "Position %r is out of range for %r." % (i, self))
+        return not (self == other)
 
-        return self.copy(sequence=seq, quality=qual)
+    @stable(as_of="0.4.0")
+    def __getitem__(self, indexable):
+        """Slice the biological sequence.
 
-    def __hash__(self):
-        """The hash operator.
+        Parameters
+        ----------
+        indexable : int, slice, iterable (int and slice), 1D array_like (bool)
+            The position(s) to return from the biological sequence. If
+            `indexable` is an iterable of integers, these are assumed to be
+            indices in the sequence to keep. If `indexable` is a 1D
+            ``array_like`` of booleans, these are assumed to be the positions
+            in the sequence to keep.
 
         Returns
         -------
-        int
-            The hash of the `BiologicalSequence`.
+        Sequence
+            New biological sequence containing the position(s) specified by
+            `indexable` in the current biological sequence. If quality scores
+            are present, they will be sliced in the same manner and included in
+            the returned biological sequence. ID and description are also
+            included.
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUCGUGAAGGA')
-        >>> hash(s)
-        -1080059835405276950
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUCGUGAAGGA')
 
-        .. shownumpydoc
+        Obtain a single character from the biological sequence:
 
-        """
-        return hash(self._sequence)
+        >>> s[1]
+        Sequence
+        -------------
+        Stats:
+            length: 1
+        -------------
+        0 G
 
-    def __iter__(self):
-        """The iter operator.
+        Obtain a slice:
 
-        Returns
-        -------
-        iterator
-            Position iterator for the `BiologicalSequence`.
+        >>> s[7:]
+        Sequence
+        -------------
+        Stats:
+            length: 5
+        -------------
+        0 AAGGA
 
-        Examples
-        --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUC')
-        >>> for c in s: print(c)
-        G
-        G
-        U
-        C
+        Obtain characters at the following indices:
 
-        .. shownumpydoc
+        >>> s[[3, 4, 7, 0, 3]]
+        Sequence
+        -------------
+        Stats:
+            length: 5
+        -------------
+        0 CGAGC
+
+        Obtain characters at positions evaluating to `True`:
+
+        >>> s = Sequence('GGUCG')
+        >>> index = [True, False, True, 'a' is 'a', False]
+        >>> s[index]
+        Sequence
+        -------------
+        Stats:
+            length: 3
+        -------------
+        0 GUC
 
         """
-        return iter(self._sequence)
+        if (not isinstance(indexable, np.ndarray) and
+            ((not isinstance(indexable, six.string_types)) and
+             hasattr(indexable, '__iter__'))):
+            indexable_ = indexable
+            indexable = np.asarray(indexable)
+
+            if indexable.dtype == object:
+                indexable = list(indexable_)  # TODO: Don't blow out memory
+
+                if len(indexable) == 0:
+                    # indexing with an empty list, so convert to ndarray and
+                    # fall through to ndarray slicing below
+                    indexable = np.asarray(indexable)
+                else:
+                    seq = np.concatenate(
+                        list(_slices_from_iter(self._bytes, indexable)))
+                    index = _as_slice_if_single_index(indexable)
+
+                    positional_metadata = None
+                    if self.has_positional_metadata():
+                        pos_md_slices = list(_slices_from_iter(
+                                             self.positional_metadata, index))
+                        positional_metadata = pd.concat(pos_md_slices)
+
+                    return self._to(sequence=seq,
+                                    positional_metadata=positional_metadata)
+        elif (isinstance(indexable, six.string_types) or
+                isinstance(indexable, bool)):
+            raise IndexError("Cannot index with %s type: %r" %
+                             (type(indexable).__name__, indexable))
+
+        if (isinstance(indexable, np.ndarray) and
+            indexable.dtype == bool and
+                len(indexable) != len(self)):
+            raise IndexError("An boolean vector index must be the same length"
+                             " as the sequence (%d, not %d)." %
+                             (len(self), len(indexable)))
+
+        if isinstance(indexable, np.ndarray) and indexable.size == 0:
+            # convert an empty ndarray to a supported dtype for slicing a numpy
+            # array
+            indexable = indexable.astype(int)
+
+        seq = self._bytes[indexable]
+        positional_metadata = self._slice_positional_metadata(indexable)
+
+        return self._to(sequence=seq, positional_metadata=positional_metadata)
+
+    def _slice_positional_metadata(self, indexable):
+        if self.has_positional_metadata():
+            if _is_single_index(indexable):
+                index = _single_index_to_slice(indexable)
+            else:
+                index = indexable
+            return self.positional_metadata.iloc[index]
+        else:
+            return None
 
+    @stable(as_of="0.4.0")
     def __len__(self):
-        """The len operator.
+        """Return the number of characters in the biological sequence.
 
         Returns
         -------
         int
-            The length of the `BiologicalSequence`.
+            The length of the biological sequence.
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUC')
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUC')
         >>> len(s)
         4
 
-        .. shownumpydoc
-
         """
-        return len(self._sequence)
-
-    def __ne__(self, other):
-        """The inequality operator.
-
-        Biological sequences are not equal if their sequence is different or
-        they are not the same type. Identifier, description, and quality scores
-        **are ignored**.
+        return self._bytes.size
 
-        Parameters
-        ----------
-        other : `BiologicalSequence`
-            The sequence to test for inequality against.
+    @stable(as_of="0.4.0")
+    def __nonzero__(self):
+        """Returns truth value (truthiness) of sequence.
 
         Returns
         -------
         bool
-            Indicates whether `self` and `other` are not equal.
-
-        See Also
-        --------
-        __eq__
-        equals
-
-        Notes
-        -----
-        See ``BiologicalSequence.equals`` for more fine-grained control of
-        equality testing.
+            True if length of sequence is greater than 0, else False.
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUCGUGAAGGA')
-        >>> t = BiologicalSequence('GGUCGUGAAGGA')
-        >>> s != t
+        >>> from skbio import Sequence
+        >>> bool(Sequence(''))
         False
-        >>> u = BiologicalSequence('GGUCGUGACCGA')
-        >>> u != t
+        >>> bool(Sequence('ACGT'))
         True
 
-        .. shownumpydoc
-
         """
-        return not (self == other)
+        return len(self) > 0
 
-    def __repr__(self):
-        """The repr method.
-
-        Returns
-        -------
-        str
-            Returns a string representation of the object.
+    @stable(as_of="0.4.0")
+    def __iter__(self):
+        """Iterate over positions in the biological sequence.
 
-        Notes
-        -----
-        String representation contains the class name, the first ten characters
-        of the sequence followed by ellipses (or the full sequence
-        and no ellipses, if the sequence is less than 11 characters long),
-        followed by the sequence length.
+        Yields
+        ------
+        Sequence
+            Single character subsequence, one for each position in the
+            sequence.
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUCGUGAAGGA')
-        >>> repr(s)
-        '<BiologicalSequence: GGUCGUGAAG... (length: 12)>'
-        >>> t = BiologicalSequence('ACGT')
-        >>> repr(t)
-        '<BiologicalSequence: ACGT (length: 4)>'
-        >>> t
-        <BiologicalSequence: ACGT (length: 4)>
-
-        .. shownumpydoc
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUC')
+        >>> for c in s:
+        ...     str(c)
+        'G'
+        'G'
+        'U'
+        'C'
 
         """
-        first_ten = self.sequence[:10]
-        cn = self.__class__.__name__
-        length = len(self)
-        if length > 10:
-            ellipses = "..."
-        else:
-            ellipses = ""
-        return '<%s: %s%s (length: %d)>' % (cn, first_ten, ellipses, length)
+        for i in range(len(self)):
+            yield self[i]
 
+    @stable(as_of="0.4.0")
     def __reversed__(self):
-        """The reversed operator.
+        """Iterate over positions in the biological sequence in reverse order.
 
-        Returns
-        -------
-        iterator
-            Reverse position iterator for the `BiologicalSequence`.
+        Yields
+        ------
+        Sequence
+            Single character subsequence, one for each position in the
+            sequence.
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUC')
-        >>> for c in reversed(s): print(c)
-        C
-        U
-        G
-        G
-
-        .. shownumpydoc
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUC')
+        >>> for c in reversed(s):
+        ...     str(c)
+        'C'
+        'U'
+        'G'
+        'G'
 
         """
-        return reversed(self._sequence)
+        return iter(self[::-1])
 
+    @stable(as_of="0.4.0")
     def __str__(self):
-        """The str operator
+        """Return biological sequence characters as a string.
 
         Returns
         -------
         str
-            String representation of the `BiologicalSequence`. This will be the
-            full sequence, but will not contain information about the type,
-            identifier, description, or quality scores.
+            Sequence characters as a string. No metadata or positional
+            metadata will be included.
 
         See Also
         --------
-        to_fasta
-        id
-        description
-        __repr__
+        sequence
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUC')
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUCGUAAAGGA', metadata={'id':'hello'})
         >>> str(s)
-        'GGUC'
-        >>> print(s)
-        GGUC
-
-        .. shownumpydoc
+        'GGUCGUAAAGGA'
 
         """
-        return self.sequence
+        return str(self._string.decode("ascii"))
 
-    @property
-    def sequence(self):
-        """String containing underlying biological sequence characters.
+    @stable(as_of="0.4.0")
+    def __repr__(self):
+        r"""Return a string representation of the biological sequence object.
+
+        Representation includes:
+
+        * sequence type
+        * metadata keys and values: will display key/value if it is an
+          understood type, otherwise just the type will be displayed. If it is
+          an understood type whose representation is too long, just the type
+          will be displayed
+        * positional metadata: column names and column dtypes will be displayed
+          in the order they appear in the positional metadata ``pd.DataFrame``.
+          Column names (i.e., keys) follow the same display rules as metadata
+          keys
+        * sequence stats (e.g., length)
+        * up to five lines of chunked sequence data. Each line of chunked
+          sequence data displays the current position in the sequence
 
-        A string representing the characters of the biological sequence.
+        Returns
+        -------
+        str
+            String representation of the biological sequence object.
 
         Notes
         -----
-        This property is not writeable.
+        Subclasses can override Sequence._repr_stats to provide custom
+        statistics.
+
+        Examples
+        --------
+        Short sequence without metadata:
+
+        >>> from skbio import Sequence
+        >>> Sequence('ACGTAATGGATACGTAATGCA')
+        Sequence
+        -------------------------
+        Stats:
+            length: 21
+        -------------------------
+        0 ACGTAATGGA TACGTAATGC A
+
+        Longer sequence displays first two lines and last two lines:
+
+        >>> Sequence('ACGT' * 100)
+        Sequence
+        ---------------------------------------------------------------------
+        Stats:
+            length: 400
+        ---------------------------------------------------------------------
+        0   ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
+        60  ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
+        ...
+        300 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
+        360 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
+
+        Sequence with metadata and positional metadata:
+
+        >>> metadata = {
+        ...     'id': 'seq-id',
+        ...     'description': 'description of the sequence, wrapping across '
+        ...     'lines if it\'s too long',
+        ...     'authors': ['Alice', 'Bob', 'Carol'],
+        ...     'year': 2015,
+        ...     'published': True
+        ... }
+        >>> positional_metadata = {
+        ...     'quality': [3, 10, 11, 10],
+        ...     'exons': [True, True, False, True]
+        ... }
+        >>> Sequence('ACGT', metadata=metadata,
+        ...          positional_metadata=positional_metadata)
+        Sequence
+        ----------------------------------------------------------------------
+        Metadata:
+            'authors': <type 'list'>
+            'description': "description of the sequence, wrapping across lines
+                            if it's too long"
+            'id': 'seq-id'
+            'published': True
+            'year': 2015
+        Positional metadata:
+            'exons': <dtype: bool>
+            'quality': <dtype: int64>
+        Stats:
+            length: 4
+        ----------------------------------------------------------------------
+        0 ACGT
 
         """
-        return self._sequence
+        return _SequenceReprBuilder(
+            seq=self,
+            width=71,  # 79 for pep8, 8 space indent for docstrings
+            indent=4,
+            chunk_size=10).build()
 
-    @property
-    def id(self):
-        """ID of the biological sequence.
+    def _repr_stats(self):
+        """Define statistics to display in the sequence's repr.
 
-        A string representing the identifier (ID) of the biological sequence.
+        Subclasses can override this method to provide type-specific
+        statistics.
 
-        Notes
-        -----
-        This property is not writeable.
+        This method computes a single statistic: length.
+
+        Returns
+        -------
+        list
+            List of tuples where each tuple represents a statistic. Each tuple
+            contains exactly two ``str`` elements: the statistic's name/label,
+            and the str-formatted value of the statistic. Ordering of
+            statistics (i.e., list order) determines display order in the
+            sequence repr.
 
         """
-        return self._id
+        return [('length', '%d' % len(self))]
 
-    @property
-    def description(self):
-        """Description of the biological sequence.
+    @stable(as_of="0.4.0")
+    def __copy__(self):
+        """Return a shallow copy of the biological sequence.
 
-        A string representing the description of the biological sequence.
+        See Also
+        --------
+        copy
 
         Notes
         -----
-        This property is not writeable.
+        This method is equivalent to ``seq.copy(deep=False)``.
 
         """
-        return self._description
+        return self.copy(deep=False)
 
-    @property
-    def quality(self):
-        """Quality scores of the characters in the biological sequence.
+    @stable(as_of="0.4.0")
+    def __deepcopy__(self, memo):
+        """Return a deep copy of the biological sequence.
 
-        A 1-D ``numpy.ndarray`` of nonnegative integers representing Phred
-        quality scores for each character in the biological sequence, or
-        ``None`` if quality scores are not present.
+        See Also
+        --------
+        copy
 
         Notes
         -----
-        This property is not writeable. A copy of the array is *not* returned.
-        The array is read-only (i.e., its ``WRITEABLE`` flag is set to
-        ``False``).
+        This method is equivalent to ``seq.copy(deep=True)``.
 
         """
-        return self._quality
+        return self._copy(True, memo)
 
-    def has_quality(self):
-        """Return bool indicating presence of quality scores in the sequence.
+    @stable(as_of="0.4.0")
+    def has_metadata(self):
+        """Determine if the sequence contains metadata.
 
         Returns
         -------
         bool
-            ``True`` if the biological sequence has quality scores, ``False``
-            otherwise.
+            Indicates whether the sequence has metadata
 
-        See Also
+        Examples
         --------
-        quality
+        >>> from skbio import DNA
+        >>> s = DNA('ACACGACGTT')
+        >>> s.has_metadata()
+        False
+        >>> t = DNA('ACACGACGTT', metadata={'id': 'seq-id'})
+        >>> t.has_metadata()
+        True
 
         """
-        return self.quality is not None
+        return self._metadata is not None and bool(self.metadata)
 
-    def copy(self, **kwargs):
-        """Return a copy of the current biological sequence.
+    @stable(as_of="0.4.0")
+    def has_positional_metadata(self):
+        """Determine if the sequence contains positional metadata.
 
-        Returns a copy of the current biological sequence, optionally with
-        updated attributes specified as keyword arguments.
+        Returns
+        -------
+        bool
+            Indicates whether the sequence has positional metadata
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('ACACGACGTT')
+        >>> s.has_positional_metadata()
+        False
+        >>> t = DNA('ACACGACGTT', positional_metadata={'quality': range(10)})
+        >>> t.has_positional_metadata()
+        True
+
+        """
+        return (self._positional_metadata is not None and
+                len(self.positional_metadata.columns) > 0)
+
+    @stable(as_of="0.4.0")
+    def copy(self, deep=False):
+        """Return a copy of the biological sequence.
 
         Parameters
         ----------
-        kwargs : dict, optional
-            Keyword arguments passed to the ``BiologicalSequence`` (or
-            subclass) constructor. The returned copy will have its attributes
-            updated based on the values in `kwargs`. If an attribute is
-            missing, the copy will keep the same attribute as the current
-            biological sequence. Valid attribute names are `'sequence'`,
-            `'id'`, `'description'`, and `'quality'`. Default behavior is to
-            return a copy of the current biological sequence without changing
-            any attributes.
+        deep : bool, optional
+            Perform a deep copy. If ``False``, perform a shallow copy.
 
         Returns
         -------
-        BiologicalSequence
-            Copy of the current biological sequence, optionally with updated
-            attributes based on `kwargs`. Will be the same type as the current
-            biological sequence (`self`).
+        Sequence
+            Copy of the biological sequence.
 
         Notes
         -----
-        This is a shallow copy, but since biological sequences are immutable,
-        it is conceptually the same as a deep copy.
-
-        This method is the preferred way of creating new instances from an
-        existing biological sequence, instead of calling
-        ``self.__class__(...)``, as the latter can be error-prone (e.g.,
-        forgetting to propagate attributes to the new instance).
+        Since sequence objects can share the same underlying immutable sequence
+        data (or pieces of it), this method can be used to create a sequence
+        object with its own copy of the sequence data so that the original
+        sequence data can be garbage-collected.
 
         Examples
         --------
-        Create a biological sequence:
-
-        >>> from skbio import BiologicalSequence
-        >>> seq = BiologicalSequence('AACCGGTT', id='id1',
-        ...                          description='biological sequence',
-        ...                          quality=[4, 2, 22, 23, 1, 1, 1, 9])
-
-        Create a copy of ``seq``, keeping the same underlying sequence of
-        characters and quality scores, while updating ID and description:
+        Create a sequence:
 
-        >>> new_seq = seq.copy(id='new-id', description='new description')
+        >>> from pprint import pprint
+        >>> from skbio import Sequence
+        >>> seq = Sequence('ACGT',
+        ...                metadata={'id': 'seq-id', 'authors': ['Alice']},
+        ...                positional_metadata={'quality': [7, 10, 8, 5],
+        ...                                     'list': [[], [], [], []]})
 
-        Note that the copied biological sequence's underlying sequence and
-        quality scores are the same as ``seq``:
+        Make a shallow copy of the sequence:
 
-        >>> new_seq.sequence
-        'AACCGGTT'
-        >>> new_seq.quality
-        array([ 4,  2, 22, 23,  1,  1,  1,  9])
+        >>> seq_copy = seq.copy()
+        >>> seq_copy == seq
+        True
 
-        The ID and description have been updated:
+        Setting new references in the copied sequence's metadata doesn't affect
+        the original sequence's metadata:
+
+        >>> seq_copy.metadata['id'] = 'new-id'
+        >>> pprint(seq_copy.metadata)
+        {'authors': ['Alice'], 'id': 'new-id'}
+        >>> pprint(seq.metadata)
+        {'authors': ['Alice'], 'id': 'seq-id'}
+
+        The same applies to the sequence's positional metadata:
+
+        >>> seq_copy.positional_metadata.loc[0, 'quality'] = 999
+        >>> seq_copy.positional_metadata
+          list  quality
+        0   []      999
+        1   []       10
+        2   []        8
+        3   []        5
+        >>> seq.positional_metadata
+          list  quality
+        0   []        7
+        1   []       10
+        2   []        8
+        3   []        5
+
+        Since only a *shallow* copy was made, updates to mutable objects stored
+        as metadata affect the original sequence's metadata:
+
+        >>> seq_copy.metadata['authors'].append('Bob')
+        >>> pprint(seq_copy.metadata)
+        {'authors': ['Alice', 'Bob'], 'id': 'new-id'}
+        >>> pprint(seq.metadata)
+        {'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
+
+        The same applies to the sequence's positional metadata:
+
+        >>> seq_copy.positional_metadata.loc[0, 'list'].append(1)
+        >>> seq_copy.positional_metadata
+          list  quality
+        0  [1]      999
+        1   []       10
+        2   []        8
+        3   []        5
+        >>> seq.positional_metadata
+          list  quality
+        0  [1]        7
+        1   []       10
+        2   []        8
+        3   []        5
+
+        Perform a deep copy to avoid this behavior:
+
+        >>> seq_deep_copy = seq.copy(deep=True)
+
+        Updates to mutable objects no longer affect the original sequence's
+        metadata:
+
+        >>> seq_deep_copy.metadata['authors'].append('Carol')
+        >>> pprint(seq_deep_copy.metadata)
+        {'authors': ['Alice', 'Bob', 'Carol'], 'id': 'seq-id'}
+        >>> pprint(seq.metadata)
+        {'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
+
+        Nor its positional metadata:
+
+        >>> seq_deep_copy.positional_metadata.loc[0, 'list'].append(2)
+        >>> seq_deep_copy.positional_metadata
+             list  quality
+        0  [1, 2]        7
+        1      []       10
+        2      []        8
+        3      []        5
+        >>> seq.positional_metadata
+          list  quality
+        0  [1]        7
+        1   []       10
+        2   []        8
+        3   []        5
 
-        >>> new_seq.id
-        'new-id'
-        >>> new_seq.description
-        'new description'
+        """
+        return self._copy(deep, {})
+
+    def _copy(self, deep, memo):
+        # strategy: copy the sequence without metadata first, then set metadata
+        # attributes with copies. we take this approach instead of simply
+        # passing the metadata through the Sequence constructor because we
+        # don't want to copy twice (this could happen when deep=True, where we
+        # deep copy here and then shallow copy in the Sequence constructor). we
+        # also directly set the private metadata attributes instead of using
+        # their public setters to avoid an unnecessary copy
+
+        # we don't make a distinction between deep vs. shallow copy of bytes
+        # because dtype=np.uint8. we only need to make the distinction when
+        # dealing with object dtype
+        bytes = np.copy(self._bytes)
+
+        seq_copy = self._constructor(sequence=bytes, metadata=None,
+                                     positional_metadata=None)
+
+        if self.has_metadata():
+            metadata = self.metadata
+            if deep:
+                metadata = copy.deepcopy(metadata, memo)
+            else:
+                metadata = metadata.copy()
+            seq_copy._metadata = metadata
 
-        The original biological sequence's ID and description have not been
-        changed:
+        if self.has_positional_metadata():
+            positional_metadata = self.positional_metadata
+            if deep:
+                positional_metadata = copy.deepcopy(positional_metadata, memo)
+            else:
+                # deep=True makes a shallow copy of the underlying data buffer
+                positional_metadata = positional_metadata.copy(deep=True)
+            seq_copy._positional_metadata = positional_metadata
 
-        >>> seq.id
-        'id1'
-        >>> seq.description
-        'biological sequence'
+        return seq_copy
 
-        """
-        defaults = {
-            'sequence': self.sequence,
-            'id': self.id,
-            'description': self.description,
-            'quality': self.quality
-        }
-        defaults.update(kwargs)
-        return self.__class__(**defaults)
-
-    def equals(self, other, ignore=None):
-        """Compare two biological sequences for equality.
-
-        By default, biological sequences are equal if their sequence,
-        identifier, description, and quality scores are the same and they are
-        the same type.
+    @stable(as_of="0.4.0")
+    def count(self, subsequence, start=None, end=None):
+        """Count occurrences of a subsequence in the biological sequence.
 
         Parameters
         ----------
-        other : BiologicalSequence
-            The sequence to test for equality against.
-        ignore : iterable of str, optional
-            List of features to ignore in the equality test. By default, all
-            features must be the same for two biological sequences to be
-            considered equal. Features that can be ignored are ``'type'``,
-            ``'id'``, ``'description'``, ``'quality'``, and ``'sequence'``.
+        subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+            Subsequence to count occurrences of.
+        start : int, optional
+            The position at which to start counting (inclusive).
+        end : int, optional
+            The position at which to stop counting (exclusive).
 
         Returns
         -------
-        bool
-            Indicates whether `self` and `other` are equal.
+        int
+            Number of occurrences of `subsequence` in the biological sequence.
 
-        See Also
-        --------
-        __eq__
-        __ne__
+        Raises
+        ------
+        ValueError
+            If `subsequence` is of length 0.
+        TypeError
+            If `subsequence` is a ``Sequence`` object with a different type
+            than the biological sequence.
 
         Examples
         --------
-        Define two biological sequences that have the same underlying sequence
-        of characters:
-
-        >>> from skbio import BiologicalSequence
-        >>> s = BiologicalSequence('GGUCGUGAAGGA')
-        >>> t = BiologicalSequence('GGUCGUGAAGGA')
-
-        The two sequences are considered equal because they are the same type,
-        their underlying sequence of characters are the same, and their
-        optional attributes (id, description, and quality scores) were not
-        provided:
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUCG')
+        >>> s.count('G')
+        3
+        >>> s.count('GG')
+        1
+        >>> s.count('T')
+        0
+        >>> s.count('G', 2, 5)
+        1
 
-        >>> s.equals(t)
-        True
-        >>> t.equals(s)
-        True
+        """
+        if len(subsequence) == 0:
+            raise ValueError("`count` is not defined for empty subsequences.")
 
-        Define another biological sequence with a different sequence of
-        characters than the previous two biological sequences:
+        return self._string.count(
+            self._munge_to_bytestring(subsequence, "count"), start, end)
 
-        >>> u = BiologicalSequence('GGUCGUGACCGA')
-        >>> u.equals(t)
-        False
+    @stable(as_of="0.4.0")
+    def index(self, subsequence, start=None, end=None):
+        """Find position where subsequence first occurs in the sequence.
 
-        Define a biological sequence with the same sequence of characters as
-        ``u``, but with different identifier and quality scores:
-        >>> v = BiologicalSequence('GGUCGUGACCGA', id='abc',
-        ...                        quality=[1, 5, 3, 3, 2, 42, 100, 9, 10, 55,
-        ...                                 42, 42])
+        Parameters
+        ----------
+        subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+            Subsequence to search for in the biological sequence.
+        start : int, optional
+            The position at which to start searching (inclusive).
+        end : int, optional
+            The position at which to stop searching (exclusive).
 
-        By default, the two sequences are *not* considered equal because their
-        identifiers and quality scores do not match:
+        Returns
+        -------
+        int
+            Position where `subsequence` first occurs in the biological
+            sequence.
 
-        >>> u.equals(v)
-        False
+        Raises
+        ------
+        ValueError
+            If `subsequence` is not present in the biological sequence.
+        TypeError
+            If `subsequence` is a ``Sequence`` object with a different type
+            than the biological sequence.
 
-        By specifying that the quality scores and identifier should be ignored,
-        they now compare equal:
-
-        >>> u.equals(v, ignore=['quality', 'id'])
-        True
-
-        """
-        if ignore is None:
-            ignore = {}
-
-        # Checks are ordered from least to most expensive.
-        if 'type' not in ignore and self.__class__ != other.__class__:
-            return False
-
-        if 'id' not in ignore and self.id != other.id:
-            return False
-
-        if 'description' not in ignore and \
-                self.description != other.description:
-            return False
-
-        # Use array_equal instead of (a == b).all() because of this issue:
-        #     http://stackoverflow.com/a/10582030
-        if 'quality' not in ignore and not np.array_equal(self.quality,
-                                                          other.quality):
-            return False
-
-        if 'sequence' not in ignore and self.sequence != other.sequence:
-            return False
-
-        return True
-
-    def count(self, subsequence):
-        """Returns the number of occurences of subsequence.
-
-        Parameters
-        ----------
-        subsequence : str
-            The subsequence to count occurences of.
-
-        Returns
-        -------
-        int
-            The number of occurrences of substring in the `BiologicalSequence`.
-
-        Examples
-        --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUC')
-        >>> s.count('G')
-        2
-
-        """
-        return self._sequence.count(subsequence)
-
-    def degap(self):
-        """Returns a new `BiologicalSequence` with gap characters removed.
-
-        Returns
-        -------
-        BiologicalSequence
-            A new `BiologicalSequence` with all characters from
-            `self.gap_alphabet` filtered from the sequence.
-
-        Notes
-        -----
-        The type, id, and description of the result will be the
-        same as `self`. If quality scores are present, they will be filtered in
-        the same manner as the sequence and included in the resulting
-        degapped biological sequence.
-
-        Examples
-        --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUC-C--ACGTT-C.', quality=range(16))
-        >>> t = s.degap()
-        >>> t
-        <BiologicalSequence: GGUCCACGTT... (length: 11)>
-        >>> print(t)
-        GGUCCACGTTC
-        >>> t.quality
-        array([ 0,  1,  2,  3,  5,  8,  9, 10, 11, 12, 14])
+        Examples
+        --------
+        >>> from skbio import Sequence
+        >>> s = Sequence('ACACGACGTT-')
+        >>> s.index('ACG')
+        2
 
         """
-        gaps = self.gap_alphabet()
-        indices = [i for i, e in enumerate(self) if e not in gaps]
-        return self[indices]
+        try:
+            return self._string.index(
+                self._munge_to_bytestring(subsequence, "index"), start, end)
+        except ValueError:
+            raise ValueError(
+                "%r is not present in %r." % (subsequence, self))
 
-    def distance(self, other, distance_fn=None):
-        """Returns the distance to other
+    @experimental(as_of="0.4.0")
+    def distance(self, other, metric=None):
+        """Compute the distance to another sequence.
 
         Parameters
         ----------
-        other : `BiologicalSequence`
-            The `BiologicalSequence` to compute the distance to.
-        distance_fn : function, optional
-            Function used to compute the distance between `self` and `other`.
-            If ``None`` (the default), `scipy.spatial.distance.hamming` will be
-            used.
+        other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+            Sequence to compute the distance to.
+        metric : function, optional
+            Function used to compute the distance between the biological
+            sequence and `other`. If ``None`` (the default),
+            ``scipy.spatial.distance.hamming`` will be used. This function
+            should take two ``skbio.Sequence`` objects and return a ``float``.
 
         Returns
         -------
         float
-            The distance between `self` and `other`.
+            Distance between the biological sequence and `other`.
 
         Raises
         ------
-        skbio.sequence.BiologicalSequenceError
-            If ``len(self) != len(other)`` and ``distance_fn`` ==
-            ``scipy.spatial.distance.hamming``.
+        ValueError
+            If the sequences are not the same length when `metric` is ``None``
+            (i.e., `metric` is ``scipy.spatial.distance.hamming``). This is
+            only checked when using this metric, as equal length is not a
+            requirement of all sequence distance metrics. In general, the
+            metric itself should test and give an informative error message,
+            but the message from ``scipy.spatial.distance.hamming`` is somewhat
+            cryptic (as of this writing), and it's the default metric, so we
+            explicitly do this check here. This metric-specific check will be
+            removed from this method when the ``skbio.sequence.stats`` module
+            is created (track progress on issue #913).
+        TypeError
+            If `other` is a ``Sequence`` object with a different type than the
+            biological sequence.
 
         See Also
         --------
         fraction_diff
         fraction_same
-        skbio.DistanceMatrix
         scipy.spatial.distance.hamming
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUC')
-        >>> t = BiologicalSequence('AGUC')
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUC')
+        >>> t = Sequence('AGUC')
         >>> s.distance(t)
         0.25
-        >>> def dumb_dist(s1, s2): return 0.42
-        >>> s.distance(t, dumb_dist)
+        >>> def custom_dist(s1, s2): return 0.42
+        >>> s.distance(t, custom_dist)
         0.42
 
         """
-        if distance_fn is None:
-            distance_fn = hamming
-            if len(self) != len(other):
-                raise BiologicalSequenceError(
-                    "Hamming distance can only be computed between "
-                    "BiologicalSequences of equal length.")
-        return distance_fn(self, other)
+        # TODO refactor this method to accept a name (string) of the distance
+        # metric to apply and accept **kwargs
+        other = self._munge_to_sequence(other, 'distance')
+        if metric is None:
+            return self._hamming(other)
+        return float(metric(self, other))
+
+    def _hamming(self, other):
+        # Hamming requires equal length sequences. We are checking this
+        # here because the error you would get otherwise is cryptic.
+        if len(self) != len(other):
+            raise ValueError(
+                "Sequences do not have equal length. "
+                "Hamming distances can only be computed between "
+                "sequences of equal length.")
+        return float(hamming(self.values, other.values))
 
-    def fraction_diff(self, other):
-        """Return fraction of positions that differ relative to `other`
+    @stable(as_of="0.4.0")
+    def matches(self, other):
+        """Find positions that match with another sequence.
 
         Parameters
         ----------
-        other : `BiologicalSequence`
-            The `BiologicalSequence` to compare against.
+        other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+            Sequence to compare to.
 
         Returns
         -------
-        float
-            The fraction of positions that differ between `self` and `other`.
+        1D np.ndarray (bool)
+            Boolean vector where ``True`` at position ``i`` indicates a match
+            between the sequences at their positions ``i``.
 
         Raises
         ------
-        skbio.sequence.BiologicalSequenceError
-            If ``len(self) != len(other)``.
+        ValueError
+            If the sequences are not the same length.
+        TypeError
+            If `other` is a ``Sequence`` object with a different type than the
+            biological sequence.
 
         See Also
         --------
-        distance
-        fraction_same
-        scipy.spatial.distance.hamming
-
-        Notes
-        -----
-        Computed as the Hamming distance between `self` and `other`. This is
-        available in addition to `distance` in case the `distance` method is
-        updated to use something other than ``scipy.spatial.distance.hamming``
-        as the default distance metric. So, if you specifically want the
-        fraction of positions that differ, you should use this function instead
-        of `distance` to ensure backward compatibility.
+        mismatches
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUC')
-        >>> t = BiologicalSequence('AGUC')
-        >>> s.fraction_diff(t)
-        0.25
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUC')
+        >>> t = Sequence('GAUU')
+        >>> s.matches(t)
+        array([ True, False,  True, False], dtype=bool)
 
         """
-        return self.distance(other, distance_fn=hamming)
+        other = self._munge_to_sequence(other, 'matches/mismatches')
+        if len(self) != len(other):
+            raise ValueError("Match and mismatch vectors can only be "
+                             "generated from equal length sequences.")
+        return self._bytes == other._bytes
 
-    def fraction_same(self, other):
-        """Return fraction of positions that are the same relative to `other`
+    @stable(as_of="0.4.0")
+    def mismatches(self, other):
+        """Find positions that do not match with another sequence.
 
         Parameters
         ----------
-        other : `BiologicalSequence`
-            The `BiologicalSequence` to compare against.
+        other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+            Sequence to compare to.
 
         Returns
         -------
-        float
-            The fraction of positions that are the same between `self` and
-            `other`.
+        1D np.ndarray (bool)
+            Boolean vector where ``True`` at position ``i`` indicates a
+            mismatch between the sequences at their positions ``i``.
 
         Raises
         ------
-        skbio.sequence.BiologicalSequenceError
-            If ``len(self) != len(other)``.
-
-        See Also
-        --------
-        distance
-        fraction_diff
-        scipy.spatial.distance.hamming
-
-        Examples
-        --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('GGUC')
-        >>> t = BiologicalSequence('AGUC')
-        >>> s.fraction_same(t)
-        0.75
-
-        """
-        return 1. - self.fraction_diff(other)
-
-    def gap_maps(self):
-        """Return tuples mapping b/w gapped and ungapped positions
-
-        Returns
-        -------
-        tuple containing two lists
-            The first list is the length of the ungapped sequence, and each
-            entry is the position of that base in the gapped sequence. The
-            second list is the length of the gapped sequence, and each entry is
-            either None (if that position represents a gap) or the position of
-            that base in the ungapped sequence.
+        ValueError
+            If the sequences are not the same length.
+        TypeError
+            If `other` is a ``Sequence`` object with a different type than the
+            biological sequence.
 
         See Also
         --------
-        gap_vector
-
-        Notes
-        -----
-        Visual aid is useful here. Imagine we have
-        ``BiologicalSequence('-ACCGA-TA-')``. The position numbers in the
-        ungapped sequence and gapped sequence will be as follows::
-
-              0123456
-              ACCGATA
-              |||||\\
-             -ACCGA-TA-
-             0123456789
-
-        So, in the first list, position 0 maps to position 1, position 1
-        maps to position 2, position 5 maps to position 7, ... And, in the
-        second list, position 0 doesn't map to anything (so it's None),
-        position 1 maps to position 0, ...
+        matches
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('-ACCGA-TA-')
-        >>> m = s.gap_maps()
-        >>> m[0]
-        [1, 2, 3, 4, 5, 7, 8]
-        >>> m[1]
-        [None, 0, 1, 2, 3, 4, None, 5, 6, None]
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUC')
+        >>> t = Sequence('GAUU')
+        >>> s.mismatches(t)
+        array([False,  True, False,  True], dtype=bool)
 
         """
-        degapped_to_gapped = []
-        gapped_to_degapped = []
-        non_gap_count = 0
-        for i, e in enumerate(self):
-            if self.is_gap(e):
-                gapped_to_degapped.append(None)
-            else:
-                gapped_to_degapped.append(non_gap_count)
-                degapped_to_gapped.append(i)
-                non_gap_count += 1
-        return degapped_to_gapped, gapped_to_degapped
-
-    def gap_vector(self):
-        """Return list indicating positions containing gaps
+        return np.invert(self.matches(other))
 
-        Returns
-        -------
-        list of booleans
-            The list will be of length ``len(self)``, and a position will
-            contain ``True`` if the character at that position in the
-            `BiologicalSequence` is in `self.gap_alphabet`, and ``False``
-            otherwise.
+    @stable(as_of="0.4.0")
+    def match_frequency(self, other, relative=False):
+        """Return count of positions that are the same between two sequences.
 
-        See Also
-        --------
-        gap_maps
-
-        Examples
-        --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('..ACG--TT-')
-        >>> s.gap_vector()
-        [True, True, False, False, False, True, True, False, False, True]
-
-        """
-        return [self.is_gap(c) for c in self._sequence]
-
-    def unsupported_characters(self):
-        """Return the set of unsupported characters in the `BiologicalSequence`
+        Parameters
+        ----------
+        other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+            Sequence to compare to.
+        relative : bool, optional
+            If ``True``, return the relative frequency of matches instead of
+            the count.
 
         Returns
         -------
-        set
-            Invalid characters in the `BiologicalSequence` (i.e., the
-            characters that are present in the `BiologicalSequence` but which
-            are not in `BiologicalSequence.alphabet` or
-            `BiologicalSequence.gap_alphabet`.
-
-        See Also
-        --------
-        is_valid
-        alphabet
-        gap_alphabet
-        has_unsupported_characters
+        int or float
+            Number of positions that are the same between the sequences. This
+            will be an ``int`` if `relative` is ``False`` and a ``float``
+            if `relative` is ``True``.
 
-        """
-        return set(self) - self.alphabet() - self.gap_alphabet()
-
-    def has_unsupported_characters(self):
-        """Return bool indicating presence/absence of unsupported characters
-
-        Returns
-        -------
-        bool
-            ``True`` if invalid characters are present in the
-            `BiologicalSequence` (i.e., characters which are not in
-            `BiologicalSequence.alphabet` or
-            `BiologicalSequence.gap_alphabet`) and ``False`` otherwise.
+        Raises
+        ------
+        ValueError
+            If the sequences are not the same length.
+        TypeError
+            If `other` is a ``Sequence`` object with a different type than the
+            biological sequence.
 
         See Also
         --------
-        is_valid
-        alphabet
-        gap_alphabet
-        has_unsupported_characters
-
-        """
-        all_supported = self.alphabet() | self.gap_alphabet()
-        for e in self:
-            if e not in all_supported:
-                return True
-        return False
-
-    def index(self, subsequence):
-        """Return the position where subsequence first occurs
-
-        Returns
-        -------
-        int
-            The position where `subsequence` first occurs in the
-            `BiologicalSequence`.
+        mismatch_frequency
+        matches
+        mismatches
+        distance
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('ACACGACGTT-')
-        >>> s.index('ACG')
-        2
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUC')
+        >>> t = Sequence('AGUC')
+        >>> s.match_frequency(t)
+        3
+        >>> s.match_frequency(t, relative=True)
+        0.75
 
         """
-        try:
-            return self._sequence.index(subsequence)
-        except ValueError:
-            raise ValueError(
-                "%s is not present in %r." % (subsequence, self))
+        if relative:
+            return float(self.matches(other).mean())
+        else:
+            return int(self.matches(other).sum())
 
-    @classmethod
-    def is_gap(cls, char):
-        """Return True if `char` is in the `gap_alphabet` set
+    @stable(as_of="0.4.0")
+    def mismatch_frequency(self, other, relative=False):
+        """Return count of positions that differ between two sequences.
 
         Parameters
         ----------
-        char : str
-            The string to check for presence in the `BiologicalSequence`
-            `gap_alphabet`.
+        other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
+            Sequence to compare to.
+        relative : bool, optional
+            If ``True``, return the relative frequency of mismatches instead of
+            the count.
 
         Returns
         -------
-        bool
-            Indicates whether `char` is in the `BiologicalSequence` attribute
-            `gap_alphabet`.
+        int or float
+            Number of positions that differ between the sequences. This will be
+            an ``int`` if `relative` is ``False`` and a ``float``
+            if `relative` is ``True``.
 
-        Notes
-        -----
-        This is a class method.
+        Raises
+        ------
+        ValueError
+            If the sequences are not the same length.
+        TypeError
+            If `other` is a ``Sequence`` object with a different type than the
+            biological sequence.
 
-        Examples
+        See Also
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> BiologicalSequence.is_gap('.')
-        True
-        >>> BiologicalSequence.is_gap('P')
-        False
-        >>> s = BiologicalSequence('ACACGACGTT')
-        >>> s.is_gap('-')
-        True
-
-        """
-        return char in cls.gap_alphabet()
-
-    def is_gapped(self):
-        """Return True if char(s) in `gap_alphabet` are present
-
-        Returns
-        -------
-        bool
-            Indicates whether there are one or more occurences of any character
-            in `self.gap_alphabet` in the `BiologicalSequence`.
+        match_frequency
+        matches
+        mismatches
+        distance
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('ACACGACGTT')
-        >>> s.is_gapped()
-        False
-        >>> t = BiologicalSequence('A.CAC--GACGTT')
-        >>> t.is_gapped()
-        True
-
-        """
-        for e in self:
-            if self.is_gap(e):
-                return True
-        return False
-
-    def is_valid(self):
-        """Return True if the sequence is valid
-
-        Returns
-        -------
-        bool
-            ``True`` if `self` is valid, and ``False`` otherwise.
-
-        Notes
-        -----
-        Validity is defined as not containing any characters outside of
-        `self.alphabet` and `self.gap_alphabet`.
+        >>> from skbio import Sequence
+        >>> s = Sequence('GGUC')
+        >>> t = Sequence('AGUC')
+        >>> s.mismatch_frequency(t)
+        1
+        >>> s.mismatch_frequency(t, relative=True)
+        0.25
 
         """
-        return not self.has_unsupported_characters()
+        if relative:
+            return float(self.mismatches(other).mean())
+        else:
+            return int(self.mismatches(other).sum())
 
-    def k_words(self, k, overlapping=True):
-        """Get the list of words of length k
+    @stable(as_of="0.4.0")
+    def iter_kmers(self, k, overlap=True):
+        """Generate kmers of length `k` from the biological sequence.
 
         Parameters
         ----------
         k : int
-            The word length.
-        overlapping : bool, optional
-            Defines whether the k-words should be overlapping or not
-            overlapping.
+            The kmer length.
+        overlap : bool, optional
+            Defines whether the kmers should be overlapping or not.
 
-        Returns
-        -------
-        iterator of BiologicalSequences
-            Iterator of words of length `k` contained in the
-            BiologicalSequence.
+        Yields
+        ------
+        Sequence
+            kmer of length `k` contained in the biological sequence.
 
         Raises
         ------
         ValueError
-            If k < 1.
+            If `k` is less than 1.
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('ACACGACGTT')
-        >>> [str(kw) for kw in s.k_words(4, overlapping=False)]
-        ['ACAC', 'GACG']
-        >>> [str(kw) for kw in s.k_words(3, overlapping=True)]
-        ['ACA', 'CAC', 'ACG', 'CGA', 'GAC', 'ACG', 'CGT', 'GTT']
+        >>> from skbio import Sequence
+        >>> s = Sequence('ACACGACGTT')
+        >>> for kmer in s.iter_kmers(4, overlap=False):
+        ...     str(kmer)
+        'ACAC'
+        'GACG'
+        >>> for kmer in s.iter_kmers(3, overlap=True):
+        ...     str(kmer)
+        'ACA'
+        'CAC'
+        'ACG'
+        'CGA'
+        'GAC'
+        'ACG'
+        'CGT'
+        'GTT'
 
         """
         if k < 1:
             raise ValueError("k must be greater than 0.")
 
-        sequence_length = len(self)
-
-        if overlapping:
+        if overlap:
             step = 1
+            count = len(self) - k + 1
         else:
             step = k
+            count = len(self) // k
 
-        for i in range(0, sequence_length - k + 1, step):
-            yield self[i:i+k]
-
-    def k_word_counts(self, k, overlapping=True):
-        """Get the counts of words of length k
-
-        Parameters
-        ----------
-        k : int
-            The word length.
-        overlapping : bool, optional
-            Defines whether the k-words should be overlapping or not
-            overlapping.
-
-        Returns
-        -------
-        collections.Counter
-            The counts of words of length `k` contained in the
-            BiologicalSequence.
-
-        Examples
-        --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('ACACAT')
-        >>> s.k_word_counts(3, overlapping=True)
-        Counter({'ACA': 2, 'CAC': 1, 'CAT': 1})
-
-        """
-        k_words = self.k_words(k, overlapping)
-        return Counter((str(seq) for seq in k_words))
+        if self.has_positional_metadata():
+            for i in range(0, len(self) - k + 1, step):
+                yield self[i:i+k]
+        # Optimized path when no positional metadata
+        else:
+            kmers = np.lib.stride_tricks.as_strided(
+                self._bytes, shape=(k, count), strides=(1, step)).T
+            for s in kmers:
+                yield self._to(sequence=s)
 
-    def k_word_frequencies(self, k, overlapping=True):
-        """Get the frequencies of words of length `k`
+    @stable(as_of="0.4.0")
+    def kmer_frequencies(self, k, overlap=True, relative=False):
+        """Return counts of words of length `k` from the biological sequence.
 
         Parameters
         ----------
         k : int
             The word length.
-        overlapping : bool, optional
-            Defines whether the k-words should be overlapping or not
-            overlapping. This is only relevant when `k` > 1.
-
-        Returns
-        -------
-        collections.defaultdict
-            The frequencies of words of length `k` contained in the
-            ``BiologicalSequence``.
-
-        Examples
-        --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('ACACAT')
-        >>> s.k_word_frequencies(3, overlapping=True)
-        defaultdict(<type 'float'>, {'CAC': 0.25, 'ACA': 0.5, 'CAT': 0.25})
-
-        """
-        if overlapping:
-            num_words = len(self) - k + 1
-        else:
-            num_words = len(self) // k
-
-        result = defaultdict(float)
-        k_word_counts = self.k_word_counts(k, overlapping=overlapping)
-        for word, count in viewitems(k_word_counts):
-            result[str(word)] = count / num_words
-        return result
-
-    def lower(self):
-        """Convert the BiologicalSequence to lowercase
-
-        Returns
-        -------
-        BiologicalSequence
-            The `BiologicalSequence` with all characters converted to
-            lowercase.
-
-        """
-        return self.copy(sequence=self.sequence.lower())
-
-    def nondegenerates(self):
-        """Yield all nondegenerate versions of the sequence.
+        overlap : bool, optional
+            Defines whether the kmers should be overlapping or not.
+        relative : bool, optional
+            If ``True``, return the relative frequency of each kmer instead of
+            its count.
 
         Returns
         -------
-        generator
-            Generator yielding all possible nondegenerate versions of the
-            sequence. Each sequence will have the same type, id, description,
-            and quality scores as `self`.
+        collections.Counter or collections.defaultdict
+            Frequencies of words of length `k` contained in the biological
+            sequence. This will be a ``collections.Counter`` if `relative` is
+            ``False`` and a ``collections.defaultdict`` if `relative` is
+            ``True``.
 
         Raises
         ------
-        BiologicalSequenceError
-            If the sequence contains an invalid character (a character that
-            isn't an IUPAC character or a gap character).
-
-        See Also
-        --------
-        iupac_degeneracies
-
-        Notes
-        -----
-        There is no guaranteed ordering to the generated sequences.
-
-        Examples
-        --------
-        >>> from skbio.sequence import NucleotideSequence
-        >>> seq = NucleotideSequence('TRG')
-        >>> seq_generator = seq.nondegenerates()
-        >>> for s in sorted(seq_generator, key=str): print(s)
-        TAG
-        TGG
-
-        """
-        degen_chars = self.iupac_degeneracies()
-        nonexpansion_chars = self.iupac_standard_characters().union(
-            self.gap_alphabet())
-
-        expansions = []
-        for char in self:
-            if char in nonexpansion_chars:
-                expansions.append(char)
-            else:
-                # Use a try/except instead of explicitly checking for set
-                # membership on the assumption that an exception is rarely
-                # thrown.
-                try:
-                    expansions.append(degen_chars[char])
-                except KeyError:
-                    raise BiologicalSequenceError(
-                        "Sequence contains an invalid character: %s" % char)
-
-        result = product(*expansions)
-        return (self.copy(sequence=nondegen_seq) for nondegen_seq in result)
-
-    def to_fasta(self, field_delimiter=" ", terminal_character="\n"):
-        """Return the sequence as a fasta-formatted string
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``to_fasta`` will be removed in scikit-bio 0.3.0. It is replaced by
-           ``write``, which is a more general method for serializing
-           FASTA-formatted files. ``write`` supports multiple file formats by
-           taking advantage of scikit-bio's I/O registry system. See
-           :mod:`skbio.io` for more details.
-
-        Parameters
-        ----------
-        field_delimiter : str, optional
-            The character(s) to use on the header line between the
-            `self.id` and `self.description`.
-
-        terminal_character : str, optional
-            The last character to be included in the result (if you don't want
-            a trailing newline or other character in the result, you can pass
-            ``terminal_character=""``).
-
-        Returns
-        -------
-        str
-            The `BiologicalSequence` as a fasta-formatted string.
+        ValueError
+            If `k` is less than 1.
 
         Examples
         --------
-        >>> from skbio.sequence import BiologicalSequence
-        >>> s = BiologicalSequence('ACACGACGTT')
-        >>> print(s.to_fasta(terminal_character=""))
-        >
-        ACACGACGTT
-        >>> t = BiologicalSequence('ACA',id='my-seq',description='h')
-        >>> print(t.to_fasta(terminal_character=""))
-        >my-seq h
-        ACA
-
-        """
-        warnings.warn(
-            "BiologicalSequence.to_fasta is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use "
-            "BiologicalSequence.write.", DeprecationWarning)
-
-        if self._description:
-            header_line = '%s%s%s' % (self._id, field_delimiter,
-                                      self._description)
-        else:
-            header_line = self._id
-
-        return '>%s\n%s%s' % (
-            header_line, self.sequence, terminal_character)
-
-    def upper(self):
-        """Convert the BiologicalSequence to uppercase
-
-        Returns
-        -------
-        BiologicalSequence
-            The `BiologicalSequence` with all characters converted to
-            uppercase.
+        >>> from skbio import Sequence
+        >>> s = Sequence('ACACATTTATTA')
+        >>> s.kmer_frequencies(3, overlap=False)
+        Counter({'TTA': 2, 'ACA': 1, 'CAT': 1})
+        >>> s.kmer_frequencies(3, relative=True, overlap=False)
+        defaultdict(<type 'float'>, {'ACA': 0.25, 'TTA': 0.5, 'CAT': 0.25})
 
         """
-        return self.copy(sequence=self.sequence.upper())
-
-    def _set_quality(self, quality):
-        if quality is not None:
-            quality = np.asarray(quality)
+        kmers = self.iter_kmers(k, overlap=overlap)
+        freqs = collections.Counter((str(seq) for seq in kmers))
 
-            if quality.ndim == 0:
-                # We have something scalar-like, so create a single-element
-                # vector to store it.
-                quality = np.reshape(quality, 1)
-
-            if quality.shape == (0,):
-                # cannot safe cast an empty vector from float to int
-                cast_type = 'unsafe'
+        if relative:
+            if overlap:
+                num_kmers = len(self) - k + 1
             else:
-                cast_type = 'safe'
-
-            quality = quality.astype(int, casting=cast_type, copy=False)
-            quality.flags.writeable = False
+                num_kmers = len(self) // k
 
-            if quality.ndim != 1:
-                raise BiologicalSequenceError(
-                    "Phred quality scores must be 1-D.")
-            if len(quality) != len(self):
-                raise BiologicalSequenceError(
-                    "Number of Phred quality scores (%d) must match the "
-                    "number of characters in the biological sequence (%d)." %
-                    (len(quality), len(self._sequence)))
-            if (quality < 0).any():
-                raise BiologicalSequenceError(
-                    "Phred quality scores must be greater than or equal to "
-                    "zero.")
+            relative_freqs = collections.defaultdict(float)
+            for kmer, count in viewitems(freqs):
+                relative_freqs[kmer] = count / num_kmers
+            freqs = relative_freqs
 
-        self._quality = quality
+        return freqs
 
-    def regex_iter(self, regex, retrieve_group_0=False):
-        """Find patterns specified by regular expression
+    @stable(as_of="0.4.0")
+    def find_with_regex(self, regex, ignore=None):
+        """Generate slices for patterns matched by a regular expression.
 
         Parameters
         ----------
-        regex : SRE_Pattern
-            A compiled regular expression (e.g., from re.compile) with
-            finditer method
-        retrieve_group_0 : bool, optional
-            Defaults to ``False``. If ``True``, group(0) will be included in
-            each list of tuples, which represents the shortest possible
-            substring of the full sequence that contains all the other groups
-
-        Returns
-        -------
-        generator
-            yields lists of 3-tuples. Each 3-tuple represents a group from the
-            matched regular expression, and contains the start of the hit, the
-            end of the hit, and the substring that was hit
-        """
-        start = 0 if retrieve_group_0 else 1
-
-        for match in regex.finditer(self._sequence):
-            for g in range(start, len(match.groups())+1):
-                yield (match.start(g), match.end(g), match.group(g))
-
-
-class NucleotideSequence(BiologicalSequence):
-    """Base class for nucleotide sequences.
-
-    A `NucleotideSequence` is a `BiologicalSequence` with additional methods
-    that are only applicable for nucleotide sequences, and containing only
-    characters used in the IUPAC DNA or RNA lexicon.
-
-    See Also
-    --------
-    BiologicalSequence
-
-    Notes
-    -----
-    All uppercase and lowercase IUPAC DNA/RNA characters are supported.
-
-    """
-
-    @classmethod
-    def complement_map(cls):
-        """Return the mapping of characters to their complements.
-
-        Returns
-        -------
-        dict
-            Mapping of characters to their complements.
-
-        Notes
-        -----
-        Complements cannot be defined for a generic `NucleotideSequence`
-        because the complement of 'A' is ambiguous.
-        `NucleotideSequence.complement_map` will therefore be the empty dict.
-        Thanks, nature...
-
-        """
-        return {}
-
-    @classmethod
-    def iupac_standard_characters(cls):
-        """Return the non-degenerate IUPAC nucleotide characters.
-
-        Returns
-        -------
-        set
-            Non-degenerate IUPAC nucleotide characters.
-
-        """
-        return set("ACGTUacgtu")
-
-    @classmethod
-    def iupac_degeneracies(cls):
-        """Return the mapping of degenerate to non-degenerate characters.
+        regex : str or regular expression object
+            String to be compiled into a regular expression, or a pre-
+            compiled regular expression object (e.g., from calling
+            ``re.compile``).
+        ignore : 1D array_like (bool) or iterable (slices or ints), optional
+            Indicate the positions to ignore when matching.
+
+        Yields
+        ------
+        slice
+            Location where the regular expression matched.
 
-        Returns
-        -------
-        dict of sets
-            Mapping of IUPAC degenerate nucleotide character to the set of
-            non-degenerate IUPAC nucleotide characters it represents.
+        Examples
+        --------
+        >>> from skbio import Sequence
+        >>> s = Sequence('AATATACCGGTTATAA')
+        >>> for match in s.find_with_regex('(TATA+)'):
+        ...     match
+        ...     str(s[match])
+        slice(2, 6, None)
+        'TATA'
+        slice(11, 16, None)
+        'TATAA'
 
         """
-        degen_map = {
-            "R": set("AG"), "Y": set("CTU"), "M": set("AC"), "K": set("TUG"),
-            "W": set("ATU"), "S": set("GC"), "B": set("CGTU"),
-            "D": set("AGTU"), "H": set("ACTU"), "V": set("ACG"),
-            "N": set("ACGTU")
-        }
+        if isinstance(regex, six.string_types):
+            regex = re.compile(regex)
 
-        for degen_char in list(degen_map.keys()):
-            nondegen_chars = degen_map[degen_char]
-            degen_map[degen_char.lower()] = set(
-                ''.join(nondegen_chars).lower())
+        lookup = np.arange(len(self))
+        if ignore is None:
+            string = str(self)
+        else:
+            ignore = self._munge_to_index_array(ignore)
+            lookup = np.delete(lookup, ignore)
+            string = str(self[lookup])
 
-        return degen_map
+        for match in regex.finditer(string):
+            # We start at 1 because we don't want the group that contains all
+            # other groups.
+            for g in range(1, len(match.groups())+1):
+                yield slice(lookup[match.start(g)],
+                            lookup[match.end(g) - 1] + 1)
 
-    def _complement(self, reverse=False):
-        """Returns `NucleotideSequence` that is (reverse) complement of `self`.
+    @stable(as_of="0.4.0")
+    def iter_contiguous(self, included, min_length=1, invert=False):
+        """Yield contiguous subsequences based on `included`.
 
         Parameters
         ----------
-        reverse : bool, optional
-            If ``True``, reverse `self` before complementing.
-
-        Returns
-        -------
-        NucelotideSequence
-            The (reverse) complement of `self`. Specific type will be the same
-            as ``type(self)``.
+        included : 1D array_like (bool) or iterable (slices or ints)
+            `included` is transformed into a flat boolean vector where each
+            position will either be included or skipped. All contiguous
+            included positions will be yielded as a single region.
+        min_length : int, optional
+            The minimum length of a subsequence for it to be yielded.
+            Default is 1.
+        invert : bool, optional
+            Whether to invert `included` such that it describes what should be
+            skipped instead of included. Default is False.
 
-        Raises
+        Yields
         ------
-        skbio.sequence.BiologicalSequenceError
-            If a character is present in the `NucleotideSequence` that is not
-            in the complement map.
+        Sequence
+            Contiguous subsequence as indicated by `included`.
 
         Notes
         -----
-        This private method centralizes the logic for `complement` and
-        `reverse_complement`.
-
-        """
-        result = []
-        complement_map = self.complement_map()
-        seq_iterator = reversed(self) if reverse else self
-        for base in seq_iterator:
-            try:
-                result.append(complement_map[base])
-            except KeyError:
-                raise BiologicalSequenceError(
-                    "Don't know how to complement base %s. Is it in "
-                    "%s.complement_map?" % (base, self.__class__.__name__))
-
-        quality = self.quality
-        if self.has_quality() and reverse:
-            quality = self.quality[::-1]
-
-        return self.copy(sequence=result, quality=quality)
-
-    def complement(self):
-        """Return the complement of the `NucleotideSequence`
-
-        Returns
-        -------
-        NucelotideSequence
-            The complement of `self`. Specific type will be the same as
-            ``type(self)``.
-
-        Raises
-        ------
-        skbio.sequence.BiologicalSequenceError
-            If a character is present in the `NucleotideSequence` that is not
-            in `self.complement_map`.
+        If slices provide adjacent ranges, then they will be considered the
+        same contiguous subsequence.
 
-        See Also
+        Examples
         --------
-        reverse_complement
-        complement_map
-
-        Notes
-        -----
-        The type, id, description, and quality scores of the result will be the
-        same as `self`.
+        Here we use `iter_contiguous` to find all of the contiguous ungapped
+        sequences using a boolean vector derived from our DNA sequence.
+
+        >>> from skbio import DNA
+        >>> s = DNA('AAA--TT-CCCC-G-')
+        >>> no_gaps = ~s.gaps()
+        >>> for ungapped_subsequence in s.iter_contiguous(no_gaps,
+        ...                                               min_length=2):
+        ...     print(ungapped_subsequence)
+        AAA
+        TT
+        CCCC
+
+        Note how the last potential subsequence was skipped because it would
+        have been smaller than our `min_length` which was set to 2.
+
+        We can also use `iter_contiguous` on a generator of slices as is
+        produced by `find_motifs` (and `find_with_regex`).
+
+        >>> from skbio import Protein
+        >>> s = Protein('ACDFNASANFTACGNPNRTESL')
+        >>> for subseq in s.iter_contiguous(s.find_motifs('N-glycosylation')):
+        ...     print(subseq)
+        NASANFTA
+        NRTE
+
+        Note how the first subsequence contains two N-glycosylation sites. This
+        happened because they were contiguous.
 
         """
-        return self._complement()
+        idx = self._munge_to_index_array(included)
+        if invert:
+            idx = np.delete(np.arange(len(self)), idx)
 
-    def is_reverse_complement(self, other):
-        """Return True if `other` is the reverse complement of `self`
+        # Adapted from http://stackoverflow.com/a/7353335/579416
+        for contig in np.split(idx, np.where(np.diff(idx) != 1)[0] + 1):
+            r = self[contig]
+            if len(r) >= min_length:
+                yield r
 
-        Returns
-        -------
-        bool
-            `True` if `other` is the reverse complement of `self` and `False`
-            otherwise.
-
-        Raises
-        ------
-        skbio.sequence.BiologicalSequenceError
-            If a character is present in `other` that is not in the
-            `self.complement_map`.
+    def _to(self, sequence=None, metadata=None, positional_metadata=None):
+        """Return a copy of the current biological sequence.
 
-        See Also
-        --------
-        reverse_complement
+        Returns a copy of the current biological sequence, optionally with
+        updated attributes specified as keyword arguments.
 
-        """
-        return self == other.reverse_complement()
+        Arguments are the same as those passed to the ``Sequence`` constructor.
+        The returned copy will have its attributes updated based on the
+        arguments. If an attribute is missing, the copy will keep the same
+        attribute as the current biological sequence. Valid attribute names
+        are `'sequence'`, `'metadata'`, and `'positional_metadata'`. Default
+        behavior is to return a copy of the current biological sequence
+        without changing any attributes.
 
-    def reverse_complement(self):
-        """Return the reverse complement of the `NucleotideSequence`
+        Parameters
+        ----------
+        sequence : optional
+        metadata : optional
+        positional_metadata : optional
 
         Returns
         -------
-        NucelotideSequence
-            The reverse complement of `self`. Specific type will be the same as
-            ``type(self)``.
-
-        Raises
-        ------
-        skbio.sequence.BiologicalSequenceError
-            If a character is present in the `NucleotideSequence` that is not
-            in `self.complement_map`.
-
-        See Also
-        --------
-        complement
-        complement_map
-        is_reverse_complement
+        Sequence
+            Copy of the current biological sequence, optionally with updated
+            attributes based on arguments. Will be the same type as the current
+            biological sequence (`self`).
 
         Notes
         -----
-        The type, id, and description of the result will be the same as `self`.
-        If quality scores are present, they will be reversed and included in
-        the resulting biological sequence.
+        By default, `metadata` and `positional_metadata` are shallow-copied and
+        the reference to `sequence` is used (without copying) for efficiency
+        since `sequence` is immutable. This differs from the behavior of
+        `Sequence.copy`, which will actually copy `sequence`.
 
-        """
-        return self._complement(reverse=True)
-    rc = reverse_complement
+        This method is the preferred way of creating new instances from an
+        existing biological sequence, instead of calling
+        ``self.__class__(...)``, as the latter can be error-prone (e.g.,
+        it's easy to forget to propagate attributes to the new instance).
 
-    def find_features(self, feature_type, min_length=1, allow_gaps=False):
-        """Search the sequence for features
+        """
+        if sequence is None:
+            sequence = self._bytes
+        if metadata is None:
+            metadata = self._metadata
+        if positional_metadata is None:
+            positional_metadata = self._positional_metadata
+        return self._constructor(sequence=sequence, metadata=metadata,
+                                 positional_metadata=positional_metadata)
 
-        Parameters
-        ----------
-        feature_type : {'purine_run', 'pyrimidine_run'}
-            The type of feature to find
-        min_length : int, optional
-            Defaults to 1. Only features at least as long as this will be
-            returned
-        allow_gaps : bool, optional
-            Defaults to ``False``. If ``True``, then gaps will not be
-            considered to disrupt a feature
+    def _constructor(self, **kwargs):
+        return self.__class__(**kwargs)
 
-        Returns
-        -------
-        generator
-            Yields tuples of the start of the feature, the end of the feature,
-            and the subsequence that composes the feature
-
-        Examples
-        --------
-        >>> from skbio.sequence import NucleotideSequence
-        >>> s = NucleotideSequence('G-AT.T')
-        >>> list(s.find_features('purine_run'))
-        [(0, 1, 'G'), (2, 3, 'A')]
-        >>> list(s.find_features('purine_run', 2))
-        []
-        >>> list(s.find_features('purine_run', 2, allow_gaps=True))
-        [(0, 3, 'G-A')]
-        >>> list(s.find_features('pyrimidine_run', 2, allow_gaps=True))
-        [(3, 6, 'T.T')]
+    def _munge_to_index_array(self, sliceable):
+        """Return an index array from something isomorphic to a boolean vector.
 
         """
-        gaps = re.escape(''.join(self.gap_alphabet()))
-        acceptable = gaps if allow_gaps else ''
-
-        if feature_type == 'purine_run':
-            pat_str = '([AGag%s]{%d,})' % (acceptable, min_length)
-        elif feature_type == 'pyrimidine_run':
-            pat_str = '([CTUctu%s]{%d,})' % (acceptable, min_length)
+        if isinstance(sliceable, six.string_types):
+            if sliceable in self.positional_metadata:
+                if self.positional_metadata[sliceable].dtype == np.bool:
+                    sliceable = self.positional_metadata[sliceable]
+                else:
+                    raise TypeError("Column '%s' in positional metadata does "
+                                    "not correspond to a boolean vector" %
+                                    sliceable)
+            else:
+                raise ValueError("No positional metadata associated with key "
+                                 "'%s'" % sliceable)
+
+        if not hasattr(sliceable, 'dtype') or (hasattr(sliceable, 'dtype') and
+                                               sliceable.dtype == 'object'):
+            sliceable = tuple(sliceable)
+            bool_mode = False
+            int_mode = False
+            for s in sliceable:
+                if isinstance(s, (bool, np.bool_)):
+                    bool_mode = True
+                elif isinstance(s, (slice, int, np.signedinteger)) or (
+                        hasattr(s, 'dtype') and s.dtype != np.bool):
+                    int_mode = True
+                else:
+                    raise TypeError("Invalid type in iterable: %s, must be one"
+                                    " of {bool, int, slice, np.signedinteger}"
+                                    % s.__class__.__name__)
+            if bool_mode and int_mode:
+                raise TypeError("Cannot provide iterable of both bool and"
+                                " int.")
+            sliceable = np.r_[sliceable]
+
+        if sliceable.dtype == np.bool:
+            if sliceable.size != len(self):
+                raise ValueError("Boolean array (%d) does not match length of"
+                                 " sequence (%d)."
+                                 % (sliceable.size, len(self)))
+            normalized, = np.where(sliceable)
         else:
-            raise ValueError("Unknown feature type: %s" % feature_type)
-
-        pat = re.compile(pat_str)
-
-        for hits in self.regex_iter(pat):
-            if allow_gaps:
-                degapped = hits[2]
-                for gap_char in self.gap_alphabet():
-                    degapped = degapped.replace(gap_char, '')
-                if len(degapped) >= min_length:
-                    yield hits
+            normalized = np.bincount(sliceable)
+            if np.any(normalized > 1):
+                raise ValueError("Overlapping index regions are not allowed.")
+
+            normalized, = np.where(normalized)
+            if np.any(normalized != sliceable):
+                raise ValueError("Index regions are out of order.")
+
+        return normalized
+
+    def _munge_to_sequence(self, other, method):
+        if isinstance(other, Sequence):
+            if type(other) != type(self):
+                raise TypeError("Cannot use %s and %s together with `%s`" %
+                                (self.__class__.__name__,
+                                 other.__class__.__name__, method))
             else:
-                yield hits
-
-
-class DNASequence(NucleotideSequence):
-    """Base class for DNA sequences.
-
-    A `DNASequence` is a `NucelotideSequence` that is restricted to only
-    containing characters used in IUPAC DNA lexicon.
-
-    See Also
-    --------
-    NucleotideSequence
-    BiologicalSequence
-
-    Notes
-    -----
-    All uppercase and lowercase IUPAC DNA characters are supported.
-
-    """
-
-    @classmethod
-    def complement_map(cls):
-        """Return the mapping of characters to their complements.
-
-        The complement of a gap character is itself.
-
-        Returns
-        -------
-        dict
-            Mapping of characters to their complements.
-
-        """
-        comp_map = {
-            'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
-            'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
-            'H': 'D', 'V': 'B', 'N': 'N', 'a': 't', 't': 'a', 'g': 'c',
-            'c': 'g', 'y': 'r', 'r': 'y', 's': 's', 'w': 'w', 'k': 'm',
-            'm': 'k', 'b': 'v', 'd': 'h', 'h': 'd', 'v': 'b', 'n': 'n'
-        }
-
-        comp_map.update({c: c for c in cls.gap_alphabet()})
-        return comp_map
+                return other
+
+        # We don't use self.__class__ or self._constructor here because we want
+        # to construct the most general type of Sequence object in order to
+        # avoid validation errors.
+        return Sequence(other)
+
+    def _munge_to_bytestring(self, other, method):
+        if type(other) is bytes:
+            return other
+        elif isinstance(other, six.string_types):
+            return other.encode('ascii')
+        else:
+            return self._munge_to_sequence(other, method)._string
 
-    @classmethod
-    def iupac_standard_characters(cls):
-        """Return the non-degenerate IUPAC DNA characters.
+    @contextmanager
+    def _byte_ownership(self):
+        if not self._owns_bytes:
+            self._bytes = self._bytes.copy()
+            self._owns_bytes = True
 
-        Returns
-        -------
-        set
-            Non-degenerate IUPAC DNA characters.
+        self._bytes.flags.writeable = True
+        yield
+        self._bytes.flags.writeable = False
 
-        """
-        return set("ACGTacgt")
 
-    @classmethod
-    def iupac_degeneracies(cls):
-        """Return the mapping of degenerate to non-degenerate characters.
+def _single_index_to_slice(start_index):
+    end_index = None if start_index == -1 else start_index+1
+    return slice(start_index, end_index)
 
-        Returns
-        -------
-        dict of sets
-            Mapping of IUPAC degenerate DNA character to the set of
-            non-degenerate IUPAC DNA characters it represents.
 
-        """
-        degen_map = {
-            "R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
-            "W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
-            "H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
-        }
+def _is_single_index(index):
+    return (isinstance(index, numbers.Integral) and
+            not isinstance(index, bool))
 
-        for degen_char in list(degen_map.keys()):
-            nondegen_chars = degen_map[degen_char]
-            degen_map[degen_char.lower()] = set(
-                ''.join(nondegen_chars).lower())
 
-        return degen_map
+def _as_slice_if_single_index(indexable):
+    if _is_single_index(indexable):
+        return _single_index_to_slice(indexable)
+    else:
+        return indexable
 
 
-# class is accessible with alternative name for convenience
-DNA = DNASequence
+def _slices_from_iter(array, indexables):
+    for i in indexables:
+        if isinstance(i, slice):
+            pass
+        elif _is_single_index(i):
+            i = _single_index_to_slice(i)
+        else:
+            raise IndexError("Cannot slice sequence from iterable "
+                             "containing %r." % i)
 
+        yield array[i]
 
-class RNASequence(NucleotideSequence):
-    """Base class for RNA sequences.
 
-    An `RNASequence` is a `NucelotideSequence` that is restricted to only
-    containing characters used in the IUPAC RNA lexicon.
+class _SequenceReprBuilder(object):
+    """Build a ``Sequence`` repr.
 
-    Notes
-    -----
-    All uppercase and lowercase IUPAC RNA characters are supported.
+    Parameters
+    ----------
+    seq : Sequence
+        Sequence to repr.
+    width : int
+        Maximum width of the repr.
+    indent : int
+        Number of spaces to use for indented lines.
+    chunk_size: int
+        Number of characters in each chunk of a sequence.
 
     """
+    def __init__(self, seq, width, indent, chunk_size):
+        self._seq = seq
+        self._width = width
+        self._indent = ' ' * indent
+        self._chunk_size = chunk_size
+
+    def build(self):
+        lines = ElasticLines()
+
+        cls_name = self._seq.__class__.__name__
+        lines.add_line(cls_name)
+        lines.add_separator()
+
+        if self._seq.has_metadata():
+            lines.add_line('Metadata:')
+            # Python 3 doesn't allow sorting of mixed types so we can't just
+            # use sorted() on the metadata keys. Sort first by type then sort
+            # by value within each type.
+            for key in self._sorted_keys_grouped_by_type(self._seq.metadata):
+                value = self._seq.metadata[key]
+                lines.add_lines(self._format_metadata_key_value(key, value))
+
+        if self._seq.has_positional_metadata():
+            lines.add_line('Positional metadata:')
+            for key in self._seq.positional_metadata.columns.values.tolist():
+                dtype = self._seq.positional_metadata[key].dtype
+                lines.add_lines(
+                    self._format_positional_metadata_column(key, dtype))
+
+        lines.add_line('Stats:')
+        for label, value in self._seq._repr_stats():
+            lines.add_line('%s%s: %s' % (self._indent, label, value))
+        lines.add_separator()
+
+        num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
+
+        # display entire sequence if we can, else display the first two and
+        # last two lines separated by ellipsis
+        if num_lines <= 5:
+            lines.add_lines(self._format_chunked_seq(
+                range(num_lines), num_chars, column_width))
+        else:
+            lines.add_lines(self._format_chunked_seq(
+                range(2), num_chars, column_width))
+            lines.add_line('...')
+            lines.add_lines(self._format_chunked_seq(
+                range(num_lines - 2, num_lines), num_chars, column_width))
+
+        return lines.to_str()
+
+    def _sorted_keys_grouped_by_type(self, dict_):
+        """Group keys within a dict by their type and sort within type."""
+        type_sorted = sorted(dict_, key=self._type_sort_key)
+        type_and_value_sorted = []
+        for _, group in itertools.groupby(type_sorted, self._type_sort_key):
+            type_and_value_sorted.extend(sorted(group))
+        return type_and_value_sorted
+
+    def _type_sort_key(self, key):
+        return repr(type(key))
+
+    def _format_metadata_key_value(self, key, value):
+        """Format metadata key:value, wrapping across lines if necessary."""
+        key_fmt = self._format_key(key)
+
+        supported_type = True
+        if isinstance(value, (six.text_type, six.binary_type)):
+            # for stringy values, there may be u'' or b'' depending on the type
+            # of `value` and version of Python. find the starting quote
+            # character so that wrapped text will line up with that instead of
+            # the string literal prefix character. for example:
+            #
+            #     'foo': u'abc def ghi
+            #              jkl mno'
+            value_repr = repr(value)
+            extra_indent = 1
+            if not (value_repr.startswith("'") or value_repr.startswith('"')):
+                extra_indent = 2
+        # handles any number, this includes bool
+        elif value is None or isinstance(value, numbers.Number):
+            value_repr = repr(value)
+            extra_indent = 0
+        else:
+            supported_type = False
 
-    @classmethod
-    def complement_map(cls):
-        """Return the mapping of characters to their complements.
-
-        The complement of a gap character is itself.
-
-        Returns
-        -------
-        dict
-            Mapping of characters to their complements.
+        if not supported_type or len(value_repr) > 140:
+            value_repr = str(type(value))
+            # extra indent of 1 so that wrapped text lines up past the bracket:
+            #
+            #     'foo': <type
+            #             'dict'>
+            extra_indent = 1
 
-        """
-        comp_map = {
-            'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
-            'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
-            'H': 'D', 'V': 'B', 'N': 'N', 'a': 'u', 'u': 'a', 'g': 'c',
-            'c': 'g', 'y': 'r', 'r': 'y', 's': 's', 'w': 'w', 'k': 'm',
-            'm': 'k', 'b': 'v', 'd': 'h', 'h': 'd', 'v': 'b', 'n': 'n'
-        }
+        return self._wrap_text_with_indent(value_repr, key_fmt, extra_indent)
 
-        comp_map.update({c: c for c in cls.gap_alphabet()})
-        return comp_map
+    def _format_key(self, key):
+        """Format metadata key.
 
-    @classmethod
-    def iupac_standard_characters(cls):
-        """Return the non-degenerate IUPAC RNA characters.
+        Includes initial indent and trailing colon and space:
 
-        Returns
-        -------
-        set
-            Non-degenerate IUPAC RNA characters.
+            <indent>'foo':<space>
 
         """
-        return set("ACGUacgu")
+        key_fmt = self._indent + repr(key)
+        supported_types = (six.text_type, six.binary_type, numbers.Number,
+                           type(None))
+        if len(key_fmt) > (self._width / 2) or not isinstance(key,
+                                                              supported_types):
+            key_fmt = self._indent + str(type(key))
+        return '%s: ' % key_fmt
 
-    @classmethod
-    def iupac_degeneracies(cls):
-        """Return the mapping of degenerate to non-degenerate characters.
+    def _wrap_text_with_indent(self, text, initial_text, extra_indent):
+        """Wrap text across lines with an initial indentation.
 
-        Returns
-        -------
-        dict of sets
-            Mapping of IUPAC degenerate RNA character to the set of
-            non-degenerate IUPAC RNA characters it represents.
+        For example:
 
-        """
-        degen_map = {
-            "R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
-            "W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
-            "H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
-        }
-
-        for degen_char in list(degen_map.keys()):
-            nondegen_chars = degen_map[degen_char]
-            degen_map[degen_char.lower()] = set(
-                ''.join(nondegen_chars).lower())
-
-        return degen_map
-
-# class is accessible with alternative name for convenience
-RNA = RNASequence
-
-
-class ProteinSequence(BiologicalSequence):
-    """Base class for protein sequences.
+            'foo': 'abc def
+                    ghi jkl
+                    mno pqr'
 
-    A `ProteinSequence` is a `BiologicalSequence` containing only characters
-    used in the IUPAC protein lexicon.
-
-    See Also
-    --------
-    BiologicalSequence
-
-    Notes
-    -----
-    All uppercase and lowercase IUPAC protein characters are supported.
-
-    """
-
-    @classmethod
-    def iupac_standard_characters(cls):
-        """Return the non-degenerate IUPAC protein characters.
-
-        Returns
-        -------
-        set
-            Non-degenerate IUPAC protein characters.
+        <indent>'foo':<space> is `initial_text`. `extra_indent` is 1. Wrapped
+        lines are indented such that they line up with the start of the
+        previous line of wrapped text.
 
         """
-        return set("ACDEFGHIKLMNPQRSTVWYacdefghiklmnpqrstvwy")
+        return textwrap.wrap(
+            text, width=self._width, expand_tabs=False,
+            initial_indent=initial_text,
+            subsequent_indent=' ' * (len(initial_text) + extra_indent))
 
-    @classmethod
-    def iupac_degeneracies(cls):
-        """Return the mapping of degenerate to non-degenerate characters.
+    def _format_positional_metadata_column(self, key, dtype):
+        key_fmt = self._format_key(key)
+        dtype_fmt = '<dtype: %s>' % str(dtype)
+        return self._wrap_text_with_indent(dtype_fmt, key_fmt, 1)
 
-        Returns
-        -------
-        dict of sets
-            Mapping of IUPAC degenerate protein character to the set of
-            non-degenerate IUPAC protein characters it represents.
+    def _find_optimal_seq_chunking(self):
+        """Find the optimal number of sequence chunks to fit on a single line.
 
-        """
-        degen_map = {
-            "B": set("DN"), "Z": set("EQ"),
-            "X": set("ACDEFGHIKLMNPQRSTVWY")
-        }
-
-        degen_map_lower = {}
-        for degen_char in degen_map:
-            nondegen_chars = degen_map[degen_char]
-            degen_map_lower[degen_char.lower()] = set(
-                ''.join(nondegen_chars).lower())
-
-        degen_map.update(degen_map_lower)
+        Returns the number of lines the sequence will occupy, the number of
+        sequence characters displayed on each line, and the column width
+        necessary to display position info using the optimal number of sequence
+        chunks.
 
-        return degen_map
-
-# class is accessible with alternative name for convenience
-Protein = ProteinSequence
+        """
+        # strategy: use an iterative approach to find the optimal number of
+        # sequence chunks per line. start with a single chunk and increase
+        # until the max line width is exceeded. when this happens, the previous
+        # number of chunks is optimal
+        num_lines = 0
+        num_chars = 0
+        column_width = 0
+
+        num_chunks = 1
+        not_exceeded = True
+        while not_exceeded:
+            line_len, new_chunk_info = self._compute_chunked_seq_line_len(
+                num_chunks)
+            not_exceeded = line_len <= self._width
+            if not_exceeded:
+                num_lines, num_chars, column_width = new_chunk_info
+                num_chunks += 1
+        return num_lines, num_chars, column_width
+
+    def _compute_chunked_seq_line_len(self, num_chunks):
+        """Compute line length based on a number of chunks."""
+        num_chars = num_chunks * self._chunk_size
+
+        # ceil to account for partial line
+        num_lines = int(math.ceil(len(self._seq) / num_chars))
+
+        # position column width is fixed width, based on the number of
+        # characters necessary to display the position of the final line (all
+        # previous positions will be left justified using this width)
+        column_width = len('%d ' % ((num_lines - 1) * num_chars))
+
+        # column width + number of sequence characters + spaces between chunks
+        line_len = column_width + num_chars + (num_chunks - 1)
+        return line_len, (num_lines, num_chars, column_width)
+
+    def _format_chunked_seq(self, line_idxs, num_chars, column_width):
+        """Format specified lines of chunked sequence data."""
+        lines = []
+        for line_idx in line_idxs:
+            seq_idx = line_idx * num_chars
+            chars = str(self._seq[seq_idx:seq_idx+num_chars])
+            chunked_chars = chunk_str(chars, self._chunk_size, ' ')
+            lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
+        return lines
diff --git a/skbio/sequence/tests/__init__.py b/skbio/sequence/tests/__init__.py
index c99682c..3fe3dc6 100644
--- a/skbio/sequence/tests/__init__.py
+++ b/skbio/sequence/tests/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -7,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/sequence/tests/test_base.py b/skbio/sequence/tests/test_base.py
new file mode 100644
index 0000000..f071903
--- /dev/null
+++ b/skbio/sequence/tests/test_base.py
@@ -0,0 +1,48 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import unittest
+
+from skbio.sequence._base import ElasticLines
+
+
+class TestElasticLines(unittest.TestCase):
+    def setUp(self):
+        self.el = ElasticLines()
+
+    def test_empty(self):
+        self.assertEqual(self.el.to_str(), '')
+
+    def test_add_line(self):
+        self.el.add_line('foo')
+        self.assertEqual(self.el.to_str(), 'foo')
+
+    def test_add_lines(self):
+        self.el = ElasticLines()
+        self.el.add_lines(['alice', 'bob', 'carol'])
+        self.assertEqual(self.el.to_str(), 'alice\nbob\ncarol')
+
+    def test_add_separator(self):
+        self.el.add_separator()
+        self.assertEqual(self.el.to_str(), '')
+
+        self.el.add_line('foo')
+        self.assertEqual(self.el.to_str(), '---\nfoo')
+
+        self.el.add_separator()
+        self.el.add_lines(['bar', 'bazzzz'])
+        self.el.add_separator()
+
+        self.assertEqual(self.el.to_str(),
+                         '------\nfoo\n------\nbar\nbazzzz\n------')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/sequence/tests/test_dna.py b/skbio/sequence/tests/test_dna.py
new file mode 100644
index 0000000..4c3c3d3
--- /dev/null
+++ b/skbio/sequence/tests/test_dna.py
@@ -0,0 +1,45 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import unittest
+
+from skbio import DNA, RNA
+
+
+# tests specific to DNA go here. tests for functionality shared by DNA and RNA
+# go in test_nucleotide_sequences.py
+class TestDNA(unittest.TestCase):
+    def test_transcribe(self):
+        # without changes
+        self.assertEqual(DNA('').transcribe(), RNA(''))
+        self.assertEqual(DNA('A').transcribe(), RNA('A'))
+        self.assertEqual(DNA('.ACGW-').transcribe(), RNA('.ACGW-'))
+
+        # with changes
+        self.assertEqual(DNA('T').transcribe(), RNA('U'))
+        self.assertEqual(DNA('TT').transcribe(), RNA('UU'))
+        self.assertEqual(DNA('ATCTG').transcribe(), RNA('AUCUG'))
+        self.assertEqual(DNA('TTTG').transcribe(), RNA('UUUG'))
+
+    def test_transcribe_preserves_all_metadata(self):
+        exp = RNA('AGUU', metadata={'foo': 'bar'},
+                  positional_metadata={'foo': range(4)})
+        seq = DNA('AGTT', metadata={'foo': 'bar'},
+                  positional_metadata={'foo': range(4)})
+        self.assertEqual(seq.transcribe(), exp)
+
+    def test_transcribe_does_not_modify_input(self):
+        seq = DNA('ATAT')
+        self.assertEqual(seq.transcribe(), RNA('AUAU'))
+        self.assertEqual(seq, DNA('ATAT'))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/sequence/tests/test_genetic_code.py b/skbio/sequence/tests/test_genetic_code.py
index 829caed..4af0a45 100644
--- a/skbio/sequence/tests/test_genetic_code.py
+++ b/skbio/sequence/tests/test_genetic_code.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,370 +6,504 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from unittest import TestCase, main
+from __future__ import absolute_import, division, print_function
 
-from skbio import DNA, RNA, Protein
-from skbio.sequence import (GeneticCode, genetic_code,
-                            GeneticCodeInitError, InvalidCodonError)
+import itertools
+import unittest
 
+import six
+import numpy as np
+import numpy.testing as npt
 
-class GeneticCodeTests(TestCase):
+from skbio import Sequence, DNA, RNA, Protein, GeneticCode
+from skbio.sequence._genetic_code import _ncbi_genetic_codes
 
-    """Tests of the GeneticCode class."""
 
+class TestGeneticCode(unittest.TestCase):
     def setUp(self):
-        """Set up some standard genetic code representations."""
-        self.sgc = ("FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAAD"
-                    "DEEGGGG")
-        self.mt = ("FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADD"
-                   "EEGGGG")
-        self.allg = ("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"
-                     "GGGGGGGG")
-
-        self.wrong_length = [
-            "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"
-            "",
-            "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"
-            "G",
-        ]
-        self.ncbi_standard = [
-            'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG',
-            1,
-            'Standard Nuclear',
-            '---M---------------M---------------M----------------------------',
-        ]
+        self.sgc = GeneticCode.from_ncbi(1)
+
+    def test_from_ncbi_valid_table_ids(self):
+        # spot check a few tables
+        self.assertEqual(GeneticCode.from_ncbi().name,
+                         'Standard')
+        self.assertEqual(GeneticCode.from_ncbi(2).name,
+                         'Vertebrate Mitochondrial')
+        self.assertEqual(GeneticCode.from_ncbi(12).name,
+                         'Alternative Yeast Nuclear')
+        self.assertEqual(GeneticCode.from_ncbi(25).name,
+                         'Candidate Division SR1 and Gracilibacteria')
+
+    def test_from_ncbi_invalid_input(self):
+        with six.assertRaisesRegex(self, ValueError, 'table_id.*7'):
+            GeneticCode.from_ncbi(7)
+        with six.assertRaisesRegex(self, ValueError, 'table_id.*42'):
+            GeneticCode.from_ncbi(42)
+
+    def test_reading_frames(self):
+        exp = [1, 2, 3, -1, -2, -3]
+        self.assertEqual(GeneticCode.reading_frames, exp)
+        self.assertEqual(self.sgc.reading_frames, exp)
+
+        GeneticCode.reading_frames.append(42)
+
+        self.assertEqual(GeneticCode.reading_frames, exp)
+        self.assertEqual(self.sgc.reading_frames, exp)
+
+        with self.assertRaises(AttributeError):
+            self.sgc.reading_frames = [1, 2, 42]
+
+    def test_name(self):
+        self.assertEqual(self.sgc.name, 'Standard')
+        self.assertEqual(GeneticCode('M' * 64, '-' * 64).name, '')
+        self.assertEqual(GeneticCode('M' * 64, '-' * 64, 'foo').name, 'foo')
+
+        with self.assertRaises(AttributeError):
+            self.sgc.name = 'foo'
+
+    def test_init_varied_equivalent_input(self):
+        for args in (('M' * 64, '-' * 64),
+                     (Protein('M' * 64), Protein('-' * 64)),
+                     (Sequence('M' * 64), Sequence('-' * 64))):
+            gc = GeneticCode(*args)
+            self.assertEqual(gc.name, '')
+            self.assertEqual(gc._amino_acids, Protein('M' * 64))
+            self.assertEqual(gc._starts, Protein('-' * 64))
+            npt.assert_array_equal(gc._m_character_codon,
+                                   np.asarray([0, 0, 0], dtype=np.uint8))
+            self.assertEqual(len(gc._start_codons), 0)
+
+    def test_init_invalid_input(self):
+        # `amino_acids` invalid protein
+        with six.assertRaisesRegex(self, ValueError, 'Invalid character.*J'):
+            GeneticCode('J' * 64, '-' * 64)
+
+        # wrong number of amino acids
+        with six.assertRaisesRegex(self, ValueError, 'amino_acids.*64.*42'):
+            GeneticCode('M' * 42, '-' * 64)
+
+        # `amino_acids` missing M
+        with six.assertRaisesRegex(self, ValueError,
+                                   'amino_acids.*M.*character'):
+            GeneticCode('A' * 64, '-' * 64)
+
+        # `starts` invalid protein
+        with six.assertRaisesRegex(self, ValueError, 'Invalid character.*J'):
+            GeneticCode('M' * 64, 'J' * 64)
+
+        # wrong number of starts
+        with six.assertRaisesRegex(self, ValueError, 'starts.*64.*42'):
+            GeneticCode('M' * 64, '-' * 42)
+
+        # invalid characters in `starts`
+        with six.assertRaisesRegex(self, ValueError,
+                                   'starts.*M and - characters'):
+            GeneticCode('M' * 64, '-M' * 30 + '*AQR')
 
-    def test_init(self):
-        """GeneticCode init should work with correct-length sequences"""
-        sgc = GeneticCode(self.sgc)
-        self.assertEqual(sgc['UUU'], 'F')
-        mt = GeneticCode(self.mt)
-        self.assertEqual(mt['UUU'], 'F')
-        allg = GeneticCode(self.allg)
-        self.assertEqual(allg['UUU'], 'G')
-        for i in self.wrong_length:
-            self.assertRaises(GeneticCodeInitError, GeneticCode, i)
+    def test_str(self):
+        # predefined
+        exp = (
+            '  AAs  = FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAA'
+            'DDEEGGGG\n'
+            'Starts = ---M---------------M---------------M--------------------'
+            '--------\n'
+            'Base1  = UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGG'
+            'GGGGGGGG\n'
+            'Base2  = UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCC'
+            'AAAAGGGG\n'
+            'Base3  = UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG'
+            'UCAGUCAG'
+        )
+        self.assertEqual(str(self.sgc), exp)
+
+        # custom, no name
+        obs = str(GeneticCode('M' * 64, '-' * 64))
+        self.assertIn('M' * 64, obs)
+        self.assertIn('-' * 64, obs)
+
+    def test_repr(self):
+        # predefined
+        exp = (
+            'GeneticCode (Standard)\n'
+            '-----------------------------------------------------------------'
+            '--------\n'
+            '  AAs  = FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAA'
+            'DDEEGGGG\n'
+            'Starts = ---M---------------M---------------M--------------------'
+            '--------\n'
+            'Base1  = UUUUUUUUUUUUUUUUCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGG'
+            'GGGGGGGG\n'
+            'Base2  = UUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCCAAAAGGGGUUUUCCCC'
+            'AAAAGGGG\n'
+            'Base3  = UCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAGUCAG'
+            'UCAGUCAG'
+        )
+        self.assertEqual(repr(self.sgc), exp)
+
+        # custom, no name
+        obs = repr(GeneticCode('M' * 64, '-' * 64))
+        self.assertTrue(obs.startswith('GeneticCode\n'))
+        self.assertIn('M' * 64, obs)
+        self.assertIn('-' * 64, obs)
 
     def test_eq(self):
-        gc_1 = GeneticCode(self.sgc)
-        gc_2 = GeneticCode(self.sgc)
-        self.assertEqual(gc_1, gc_2)
+        amino_acids = 'AMPM' * 16
+        starts = '--M-' * 16
+
+        equal_gcs = [
+            GeneticCode(amino_acids, starts),
+            # name should be ignored
+            GeneticCode(amino_acids, starts, 'foo'),
+            # metadata/positional metadata should be ignored if Sequence
+            # subclass is provided
+            GeneticCode(
+                Protein(amino_acids, metadata={'foo': 'bar'}),
+                Protein(starts, positional_metadata={'foo': range(64)}))
+        ]
 
-    def test_eq_type_mismatch(self):
-        self.assertFalse(GeneticCode(self.sgc) == 'i cracked the code!')
+        # every gc should be equal to itself
+        for gc in equal_gcs:
+            self.assertTrue(gc == gc)
+            self.assertFalse(gc != gc)
 
-    def test_ne(self):
-        gc_1 = GeneticCode(self.sgc)
-        gc_2 = GeneticCode(self.sgc)
-        # Explicitly using !=
-        self.assertFalse(gc_1 != gc_2)
-
-    def test_standard_code(self):
-        """Standard genetic code from NCBI should have correct properties"""
-        sgc = GeneticCode(*self.ncbi_standard)
-        self.assertEqual(sgc.code_sequence, 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRI'
-                         'IIMTTTTNNKKSSRRVVVVAAAADDEEGGGG')
-        self.assertEqual(sgc.start_codon_sequence, '---M---------------M------'
-                         '---------M----------------------------')
-        self.assertEqual(sgc.start_codons, {'TTG': 'M', 'CTG': 'M',
-                                            'ATG': 'M'})
-        self.assertEqual(sgc.id, 1)
-        self.assertEqual(sgc.name, 'Standard Nuclear')
-        self.assertEqual(sgc['UUU'], 'F')
-        self.assertEqual(sgc.is_start('ATG'), True)
-        self.assertEqual(sgc.is_start('AAA'), False)
-        self.assertEqual(sgc.is_stop('UAA'), True)
-        self.assertEqual(sgc.is_stop('AAA'), False)
-        self.assertEqual(len(sgc.sense_codons), 61)
-        self.assertTrue('AAA' in sgc.sense_codons)
-        self.assertFalse('TGA' in sgc.sense_codons)
-
-    def test_standard_code_lookup(self):
-        """genetic_code should hold codes keyed by id as string and number"""
-        sgc_new = GeneticCode(*self.ncbi_standard)
-        sgc_number = genetic_code(1)
-        sgc_string = genetic_code('1')
-        sgc_empty = genetic_code()
-        for sgc in sgc_new, sgc_number, sgc_string, sgc_empty:
-            self.assertEqual(sgc.code_sequence, 'FFLLSSSSYY**CC*WLLLLPPPPHHQQR'
-                             'RRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG')
-            self.assertEqual(sgc.start_codon_sequence, '---M---------------M--'
-                             '-------------M----------------------------')
-            self.assertEqual(
-                sgc.start_codons, {'TTG': 'M', 'CTG': 'M', 'ATG': 'M'})
-            self.assertEqual(sgc.id, 1)
-            self.assertEqual(sgc.name, 'Standard Nuclear')
-            self.assertEqual(sgc['TTT'], 'F')
-            self.assertEqual(sgc.is_start('ATG'), True)
-            self.assertEqual(sgc.is_start('AAA'), False)
-            self.assertEqual(sgc.is_stop('TAA'), True)
-            self.assertEqual(sgc.is_stop('AAA'), False)
-
-        mtgc = genetic_code(2)
-        self.assertEqual(mtgc.name, 'Vertebrate Mitochondrial')
-        self.assertEqual(mtgc.is_start('AUU'), True)
-        self.assertEqual(mtgc.is_stop('UGA'), False)
-
-        self.assertEqual(sgc_new.changes(mtgc), {'AGA': 'R*', 'AGG': 'R*',
-                                                 'ATA': 'IM', 'TGA': '*W'})
-        self.assertEqual(mtgc.changes(sgc_new), {'AGA': '*R', 'AGG': '*R',
-                                                 'ATA': 'MI', 'TGA': 'W*'})
-        self.assertEqual(mtgc.changes(mtgc), {})
-        self.assertEqual(mtgc.changes('FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTT'
-                         'TNNKKSSRRVVVVAAAADDEEGGGG'), {'AGA': '*R',
-                         'AGG': '*R', 'ATA': 'MI', 'TGA': 'W*'})
+        # every pair of gcs should be equal. use permutations instead of
+        # combinations to test that comparing gc1 to gc2 and gc2 to gc1 are
+        # both equal
+        for gc1, gc2 in itertools.permutations(equal_gcs, 2):
+            self.assertTrue(gc1 == gc2)
+            self.assertFalse(gc1 != gc2)
 
-    def test_str(self):
-        """GeneticCode str() should return its code string"""
-        code_strings = self.sgc, self.mt, self.allg
-        codes = map(GeneticCode, code_strings)
-        for code, string in zip(codes, code_strings):
-            self.assertEqual(str(code), string)
-        # check an example directly in case strings are bad
-        self.assertEqual(str(self.sgc), "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMT"
-                         "TTTNNKKSSRRVVVVAAAADDEEGGGG")
-
-    def test_cmp(self):
-        """GeneticCode cmp() should act on code strings"""
-        sgc_1 = GeneticCode(self.sgc)
-        sgc_2 = GeneticCode(self.sgc)
-        self.assertEqual(sgc_1 is sgc_2, False)  # ensure different objects
-        # self.assertNotEqual(sgc_1, sgc_2) # GREG
-        self.assertEqual(sgc_1, sgc_2)
-        mtgc = GeneticCode(self.mt)
-        self.assertNotEqual(sgc_1, mtgc)
-
-    def test_getitem_codon(self):
-        """GeneticCode getitem should return amino acid for codon"""
-        # specific checks of a particular codon in the standard code
-        variant_codons = ['AUU', 'AUU', 'AUU', 'ATT', 'ATU', 'ATU']
-        sgc = GeneticCode(self.sgc)
-        for i in variant_codons:
-            self.assertEqual(sgc[i], 'I')
-        # full check for the standard code
-        codons = [a + b + c for a in 'UCAG' for b in 'TCAG' for c in 'UCAG']
-        for codon, aa in zip(codons, self.sgc):
-            self.assertEqual(sgc[codon], aa)
-        # full check for another code
-        allg = GeneticCode(self.allg)
-        for codon, aa in zip(codons, self.allg):
-            self.assertEqual(allg[codon], aa)
-        # check that degenerate codon returns X
-        self.assertEqual(sgc['NNN'], 'X')
-
-    def test_getitem_aa(self):
-        """GeneticCode getitem should return codon set for aa"""
-        # for all G, should return all the codons (in some order)
-        allg = GeneticCode(self.allg)
-        codons = [a + b + c for a in 'TCAG' for b in 'TCAG' for c in 'TCAG']
-        g_codons = allg['G']
-        codons_copy = codons[:]
-        self.assertEqual(g_codons, codons_copy)
-
-        # check some known cases in the standard genetic code
-        sgc = GeneticCode(self.sgc)
-        exp_ile = ['ATT', 'ATC', 'ATA']
-        obs_ile = sgc['I']
-        self.assertEqual(obs_ile, exp_ile)
-
-        exp_arg = ['AGA', 'AGG', 'CGT', 'CGC', 'CGA', 'CGG']
-        obs_arg = sgc['R']
-        if hasattr(self, 'assertItemsEqual'):
-            self.assertItemsEqual(obs_arg, exp_arg)
-        else:
-            self.assertCountEqual(obs_arg, exp_arg)
-
-        exp_leu = ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG']
-        obs_leu = sgc['L']
-        self.assertEqual(obs_leu, exp_leu)
-
-        exp_met = ['ATG']
-        obs_met = sgc['M']
-        self.assertEqual(obs_met, exp_met)
-
-        # unknown aa should return []
-        self.assertEqual(sgc['U'], [])
-
-    def test_getitem_invalid_length(self):
-        """GeneticCode getitem raises InvalidCodonError on wrong length"""
-        sgc = GeneticCode(self.sgc)
-        self.assertRaises(InvalidCodonError, sgc.__getitem__, 'AAAA')
-        self.assertRaises(InvalidCodonError, sgc.__getitem__, 'AA')
-
-    def test_blocks(self):
-        """GeneticCode blocks should return correct list"""
-        sgc = GeneticCode(self.sgc)
-        exp_blocks = [
-            ['TTT', 'TTC', ],
-            ['TTA', 'TTG', ],
-            ['TCT', 'TCC', 'TCA', 'TCG'],
-            ['TAT', 'TAC'],
-            ['TAA', 'TAG'],
-            ['TGT', 'TGC'],
-            ['TGA'],
-            ['TGG'],
-            ['CTT', 'CTC', 'CTA', 'CTG'],
-            ['CCT', 'CCC', 'CCA', 'CCG'],
-            ['CAT', 'CAC'],
-            ['CAA', 'CAG'],
-            ['CGT', 'CGC', 'CGA', 'CGG'],
-            ['ATT', 'ATC'],
-            ['ATA', ],
-            ['ATG', ],
-            ['ACT', 'ACC', 'ACA', 'ACG'],
-            ['AAT', 'AAC'],
-            ['AAA', 'AAG'],
-            ['AGT', 'AGC'],
-            ['AGA', 'AGG'],
-            ['GTT', 'GTC', 'GTA', 'GTG'],
-            ['GCT', 'GCC', 'GCA', 'GCG'],
-            ['GAT', 'GAC'],
-            ['GAA', 'GAG'],
-            ['GGT', 'GGC', 'GGA', 'GGG'],
+    def test_ne(self):
+        class GeneticCodeSubclass(GeneticCode):
+            pass
+
+        amino_acids = 'AMPM' * 16
+        starts = '--M-' * 16
+
+        unequal_gcs = [
+            GeneticCode(amino_acids, starts),
+            # type must match
+            GeneticCodeSubclass(amino_acids, starts),
+            # completely different type
+            'foo'
         ]
-        self.assertEqual(sgc.blocks, exp_blocks)
-
-    def test_anticodons(self):
-        """GeneticCode anticodons should return correct list"""
-        sgc = GeneticCode(self.sgc)
-        exp_anticodons = {
-            'F': ['AAA', 'GAA', ],
-            'L': ['TAA', 'CAA', 'AAG', 'GAG', 'TAG', 'CAG'],
-            'Y': ['ATA', 'GTA'],
-            '*': ['TTA', 'CTA', 'TCA'],
-            'C': ['ACA', 'GCA'],
-            'W': ['CCA'],
-            'S': ['AGA', 'GGA', 'TGA', 'CGA', 'ACT', 'GCT'],
-            'P': ['AGG', 'GGG', 'TGG', 'CGG'],
-            'H': ['ATG', 'GTG'],
-            'Q': ['TTG', 'CTG'],
-            'R': ['ACG', 'GCG', 'TCG', 'CCG', 'TCT', 'CCT'],
-            'I': ['AAT', 'GAT', 'TAT'],
-            'M': ['CAT', ],
-            'T': ['AGT', 'GGT', 'TGT', 'CGT'],
-            'N': ['ATT', 'GTT'],
-            'K': ['TTT', 'CTT'],
-            'V': ['AAC', 'GAC', 'TAC', 'CAC'],
-            'A': ['AGC', 'GGC', 'TGC', 'CGC'],
-            'D': ['ATC', 'GTC'],
-            'E': ['TTC', 'CTC'],
-            'G': ['ACC', 'GCC', 'TCC', 'CCC'],
-        }
-        self.assertEqual(sgc.anticodons, exp_anticodons)
-
-    def test_translate(self):
-        """GeneticCode translate should return correct amino acid string"""
-        allg = GeneticCode(self.allg)
-        sgc = GeneticCode(self.sgc)
-        mt = GeneticCode(self.mt)
-
-        seq = 'AUGCAUGACUUUUGA'
-        #      .  .  .  .  .        markers for codon start
-        self.assertEqual(allg.translate(seq), Protein('GGGGG'))
-        self.assertEqual(allg.translate(seq, 1), Protein('GGGG'))
-        self.assertEqual(allg.translate(seq, 2), Protein('GGGG'))
-        self.assertEqual(allg.translate(seq, 3), Protein('GGGG'))
-        self.assertEqual(allg.translate(seq, 4), Protein('GGG'))
-        self.assertEqual(allg.translate(seq, 12), Protein('G'))
-        self.assertEqual(allg.translate(seq, 14), Protein(''))
-        self.assertRaises(ValueError, allg.translate, seq, 15)
-        self.assertRaises(ValueError, allg.translate, seq, 20)
-
-        self.assertEqual(sgc.translate(seq), Protein('MHDF*'))
-        self.assertEqual(sgc.translate(seq, 3), Protein('HDF*'))
-        self.assertEqual(sgc.translate(seq, 6), Protein('DF*'))
-        self.assertEqual(sgc.translate(seq, 9), Protein('F*'))
-        self.assertEqual(sgc.translate(seq, 12), Protein('*'))
-        self.assertEqual(sgc.translate(seq, 14), Protein(''))
-        # check shortest translatable sequences
-        self.assertEqual(sgc.translate('AAA'), Protein('K'))
-        self.assertEqual(sgc.translate(''), Protein(''))
-
-        # check that different code gives different results
-        self.assertEqual(mt.translate(seq), Protein('MHDFW'))
-
-        # check translation with invalid codon(s)
-        self.assertEqual(sgc.translate('AAANNNCNC123UUU'), Protein('KXXXF'))
+        # none of the NCBI genetic codes should be equal to each other
+        unequal_gcs.extend(_ncbi_genetic_codes.values())
+
+        for gc in unequal_gcs:
+            self.assertTrue(gc == gc)
+            self.assertFalse(gc != gc)
+
+        for gc1, gc2 in itertools.permutations(unequal_gcs, 2):
+            self.assertTrue(gc1 != gc2)
+            self.assertFalse(gc1 == gc2)
+
+    def test_translate_preserves_metadata(self):
+        obs = self.sgc.translate(
+            RNA('AUG', metadata={'foo': 'bar', 'baz': 42},
+                positional_metadata={'foo': range(3)}))
+        # metadata retained, positional metadata dropped
+        self.assertEqual(obs, Protein('M',
+                                      metadata={'foo': 'bar', 'baz': 42}))
+
+    def test_translate_default_behavior(self):
+        # empty translation
+        exp = Protein('')
+        for seq in RNA(''), RNA('A'), RNA('AU'):
+            obs = self.sgc.translate(seq)
+            self.assertEqual(obs, exp)
+
+        # no start or stop codons
+        obs = self.sgc.translate(RNA('CCU'))
+        self.assertEqual(obs, Protein('P'))
+
+        # multiple alternative start codons, no stop codons, length is multiple
+        # of 3
+        obs = self.sgc.translate(RNA('CAUUUGCUGAAA'))
+        self.assertEqual(obs, Protein('HLLK'))
+
+        # multiple stop codons, length isn't multiple of 3
+        obs = self.sgc.translate(RNA('UUUUUUUAAAGUUAAGGGAU'))
+        self.assertEqual(obs, Protein('FF*S*G'))
+
+    def test_translate_reading_frame_empty_translation(self):
+        exp = Protein('')
+        for seq in RNA(''), RNA('A'), RNA('AU'):
+            for reading_frame in GeneticCode.reading_frames:
+                obs = self.sgc.translate(seq, reading_frame=reading_frame)
+                self.assertEqual(obs, exp)
+
+        # reading frames that yield a partial codon
+        for reading_frame in 2, 3, -2, -3:
+            obs = self.sgc.translate(RNA('AUG'), reading_frame=reading_frame)
+            self.assertEqual(obs, exp)
+
+    def test_translate_reading_frame_non_empty_translation(self):
+        seq = RNA('AUGGUGGAA')  # rc = UUCCACCAU
+        for reading_frame, exp_str in ((1, 'MVE'), (2, 'WW'), (3, 'GG'),
+                                       (-1, 'FHH'), (-2, 'ST'), (-3, 'PP')):
+            exp = Protein(exp_str)
+            obs = self.sgc.translate(seq, reading_frame=reading_frame)
+            self.assertEqual(obs, exp)
+
+    def test_translate_start_empty_translation(self):
+        exp = Protein('')
+        for seq in RNA(''), RNA('A'), RNA('AU'):
+            for start in {'optional', 'ignore'}:
+                obs = self.sgc.translate(seq, start=start)
+                self.assertEqual(obs, exp)
+
+            with six.assertRaisesRegex(self, ValueError,
+                                       'reading_frame=1.*start=\'require\''):
+                self.sgc.translate(seq, start='require')
+
+    def test_translate_start_with_start_codon(self):
+        # trim before start codon, replace with M. ensure alternative start
+        # codons following the start codon aren't replaced with M. ensure
+        # default behavior for handling stop codons is retained
+        seq = RNA('CAUUUGCUGAAAUGA')
+        exp = Protein('MLK*')
+        for start in {'require', 'optional'}:
+            obs = self.sgc.translate(seq, start=start)
+            self.assertEqual(obs, exp)
+
+        # ignore start codon replacement and trimming; just translate
+        exp = Protein('HLLK*')
+        obs = self.sgc.translate(seq, start='ignore')
+        self.assertEqual(obs, exp)
+
+        # just a start codon, no replacement necessary
+        seq = RNA('AUG')
+        exp = Protein('M')
+        for start in {'require', 'optional', 'ignore'}:
+            obs = self.sgc.translate(seq, start=start)
+            self.assertEqual(obs, exp)
+
+        # single alternative start codon
+        seq = RNA('CUG')
+        exp = Protein('M')
+        for start in {'require', 'optional'}:
+            obs = self.sgc.translate(seq, start=start)
+            self.assertEqual(obs, exp)
+
+        exp = Protein('L')
+        obs = self.sgc.translate(seq, start='ignore')
+        self.assertEqual(obs, exp)
+
+    def test_translate_start_no_start_codon(self):
+        seq = RNA('CAACAACAGCAA')
+        exp = Protein('QQQQ')
+        for start in {'ignore', 'optional'}:
+            obs = self.sgc.translate(seq, start=start)
+            self.assertEqual(obs, exp)
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   'reading_frame=1.*start=\'require\''):
+            self.sgc.translate(seq, start='require')
+
+        # non-start codon that translates to an AA that start codons also map
+        # to. should catch bug if code attempts to search and trim *after*
+        # translation -- this must happen *before* translation
+        seq = RNA('UUACAA')
+        exp = Protein('LQ')
+        for start in {'ignore', 'optional'}:
+            obs = self.sgc.translate(seq, start=start)
+            self.assertEqual(obs, exp)
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   'reading_frame=1.*start=\'require\''):
+            self.sgc.translate(seq, start='require')
+
+    def test_translate_start_no_accidental_mutation(self):
+        # `start` mutates a vector in-place that is derived from
+        # GeneticCode._offset_table. the current code doesn't perform an
+        # explicit copy because numpy's advanced indexing is used, which always
+        # returns a copy. test this assumption here in case that behavior
+        # changes in the future
+        offset_table = self.sgc._offset_table.copy()
+
+        seq = RNA('CAUUUGCUGAAAUGA')
+        obs = self.sgc.translate(seq, start='require')
+        self.assertEqual(obs, Protein('MLK*'))
+
+        npt.assert_array_equal(self.sgc._offset_table, offset_table)
+
+    def test_translate_stop_empty_translation(self):
+        exp = Protein('')
+        for seq in RNA(''), RNA('A'), RNA('AU'):
+            for stop in {'optional', 'ignore'}:
+                obs = self.sgc.translate(seq, stop=stop)
+                self.assertEqual(obs, exp)
+
+            with six.assertRaisesRegex(self, ValueError,
+                                       'reading_frame=1.*stop=\'require\''):
+                self.sgc.translate(seq, stop='require')
+
+    def test_translate_stop_with_stop_codon(self):
+        # multiple stop codons with trailing codons
+        seq = RNA('UGGACUUGAUAUCGUUAGGAU')
+        exp = Protein('WT')
+        for stop in {'require', 'optional'}:
+            obs = self.sgc.translate(seq, stop=stop)
+            self.assertEqual(obs, exp)
+
+        # ignore stop codon trimming; just translate
+        exp = Protein('WT*YR*D')
+        obs = self.sgc.translate(seq, stop='ignore')
+        self.assertEqual(obs, exp)
+
+        # ends with single stop codon
+        seq = RNA('UGUCUGUAA')
+        exp = Protein('CL')
+        for stop in {'require', 'optional'}:
+            obs = self.sgc.translate(seq, stop=stop)
+            self.assertEqual(obs, exp)
+
+        exp = Protein('CL*')
+        obs = self.sgc.translate(seq, stop='ignore')
+        self.assertEqual(obs, exp)
+
+        # just a stop codon
+        seq = RNA('UAG')
+        exp = Protein('')
+        for stop in {'require', 'optional'}:
+            obs = self.sgc.translate(seq, stop=stop)
+            self.assertEqual(obs, exp)
+
+        exp = Protein('*')
+        obs = self.sgc.translate(seq, stop='ignore')
+        self.assertEqual(obs, exp)
+
+    def test_translate_stop_no_stop_codon(self):
+        seq = RNA('GAAUCU')
+        exp = Protein('ES')
+        for stop in {'ignore', 'optional'}:
+            obs = self.sgc.translate(seq, stop=stop)
+            self.assertEqual(obs, exp)
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   'reading_frame=1.*stop=\'require\''):
+            self.sgc.translate(seq, stop='require')
+
+    def test_translate_trim_to_cds(self):
+        seq = RNA('UAAUUGCCUCAUUAAUAACAAUGA')
+
+        # find first start codon, trim all before it, convert alternative start
+        # codon to M, finally trim to first stop codon following the start
+        # codon
+        exp = Protein('MPH')
+        for param in {'require', 'optional'}:
+            obs = self.sgc.translate(seq, start=param, stop=param)
+            self.assertEqual(obs, exp)
+
+        exp = Protein('*LPH**Q*')
+        obs = self.sgc.translate(seq, start='ignore', stop='ignore')
+        self.assertEqual(obs, exp)
+
+        # alternative reading frame disrupts cds:
+        #     AAUUGCCUCAUUAAUAACAAUGA
+        #     NCLINNN
+        with six.assertRaisesRegex(self, ValueError,
+                                   'reading_frame=2.*start=\'require\''):
+            self.sgc.translate(seq, reading_frame=2, start='require')
+        with six.assertRaisesRegex(self, ValueError,
+                                   'reading_frame=2.*stop=\'require\''):
+            self.sgc.translate(seq, reading_frame=2, stop='require')
+
+        exp = Protein('NCLINNN')
+        for param in {'ignore', 'optional'}:
+            obs = self.sgc.translate(seq, reading_frame=2, start=param,
+                                     stop=param)
+            self.assertEqual(obs, exp)
+
+    def test_translate_invalid_input(self):
+        # invalid sequence type
+        with six.assertRaisesRegex(self, TypeError, 'RNA.*DNA'):
+            self.sgc.translate(DNA('ACG'))
+        with six.assertRaisesRegex(self, TypeError, 'RNA.*str'):
+            self.sgc.translate('ACG')
+
+        # invalid reading frame
+        with six.assertRaisesRegex(self, ValueError,
+                                   '\[1, 2, 3, -1, -2, -3\].*0'):
+            self.sgc.translate(RNA('AUG'), reading_frame=0)
+
+        # invalid start
+        with six.assertRaisesRegex(self, ValueError, 'start.*foo'):
+            self.sgc.translate(RNA('AUG'), start='foo')
+
+        # invalid stop
+        with six.assertRaisesRegex(self, ValueError, 'stop.*foo'):
+            self.sgc.translate(RNA('AUG'), stop='foo')
+
+        # gapped sequence
+        with six.assertRaisesRegex(self, ValueError, 'gapped'):
+            self.sgc.translate(RNA('UU-G'))
+
+        # degenerate sequence
+        with six.assertRaisesRegex(self, NotImplementedError, 'degenerate'):
+            self.sgc.translate(RNA('RUG'))
+
+    def test_translate_varied_genetic_codes(self):
+        # spot check using a few NCBI and custom genetic codes to translate
+        seq = RNA('AAUGAUGUGACUAUCAGAAGG')
+
+        # table_id=2
+        exp = Protein('NDVTI**')
+        obs = GeneticCode.from_ncbi(2).translate(seq)
+        self.assertEqual(obs, exp)
+
+        exp = Protein('MTI')
+        obs = GeneticCode.from_ncbi(2).translate(seq, start='require',
+                                                 stop='require')
+        self.assertEqual(obs, exp)
+
+        # table_id=22
+        exp = Protein('NDVTIRR')
+        obs = GeneticCode.from_ncbi(22).translate(seq)
+        self.assertEqual(obs, exp)
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   'reading_frame=1.*start=\'require\''):
+            GeneticCode.from_ncbi(22).translate(seq, start='require',
+                                                stop='require')
+
+        # custom, no start codons
+        gc = GeneticCode('MWN*' * 16, '-' * 64)
+        exp = Protein('MM*MWN*')
+        obs = gc.translate(seq)
+        self.assertEqual(obs, exp)
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   'reading_frame=1.*start=\'require\''):
+            gc.translate(seq, start='require', stop='require')
 
     def test_translate_six_frames(self):
-        """GeneticCode translate_six_frames provides six-frame translation"""
-
-        class fake_rna(str):
-
-            """Fake RNA class with reverse-complement"""
-            def __new__(cls, seq, rev):
-                return str.__new__(cls, seq)
-
-            def __init__(self, seq, rev):
-                self.seq = seq
-                self.rev = rev
-
-            def rc(self):
-                return self.rev
-
-        test_rna = fake_rna('AUGCUAACAUAAA', 'UUUAUGUUAGCAU')
-        #                    .  .  .  .  .    .  .  .  .  .
-        sgc = GeneticCode(self.sgc)
-        self.assertEqual(sgc.translate_six_frames(test_rna), [
-            Protein('MLT*'), Protein('C*HK'), Protein('ANI'), Protein('FMLA'),
-            Protein('LC*H'), Protein('YVS')])
-
-        # should also actually work with an RNA or DNA sequence!!!
-        test_rna = RNA('AUGCUAACAUAAA')
-        self.assertEqual(sgc.translate_six_frames(test_rna), [
-            Protein('MLT*'), Protein('C*HK'), Protein('ANI'), Protein('FMLA'),
-            Protein('LC*H'), Protein('YVS')])
-
-    def test_stop_indexes(self):
-        """should return stop codon indexes for a specified frame"""
-        sgc = GeneticCode(self.sgc)
-        seq = DNA('ATGCTAACATAAA')
-        expected = [[9], [4], []]
-        for frame, expect in enumerate(expected):
-            got = sgc.get_stop_indices(seq, start=frame)
-            self.assertEqual(got, expect)
-
-    def test_synonyms(self):
-        """GeneticCode synonyms should return aa -> codon set mapping."""
-        expected_synonyms = {
-            'A': ['GCT', 'GCC', 'GCA', 'GCG'],
-            'C': ['TGT', 'TGC'],
-            'D': ['GAT', 'GAC'],
-            'E': ['GAA', 'GAG'],
-            'F': ['TTT', 'TTC'],
-            'G': ['GGT', 'GGC', 'GGA', 'GGG'],
-            'H': ['CAT', 'CAC'],
-            'I': ['ATT', 'ATC', 'ATA'],
-            'K': ['AAA', 'AAG'],
-            'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
-            'M': ['ATG'],
-            'N': ['AAT', 'AAC'],
-            'P': ['CCT', 'CCC', 'CCA', 'CCG'],
-            'Q': ['CAA', 'CAG'],
-            'R': ['AGA', 'AGG', 'CGT', 'CGC', 'CGA', 'CGG'],
-            'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
-            'T': ['ACT', 'ACC', 'ACA', 'ACG'],
-            'V': ['GTT', 'GTC', 'GTA', 'GTG'],
-            'W': ['TGG'],
-            'Y': ['TAT', 'TAC'],
-            '*': ['TAA', 'TAG', 'TGA'],
-        }
-        obs_synonyms = GeneticCode(self.sgc).synonyms
-        # note that the lists will be arbitrary-order
-        for i in expected_synonyms:
-            if hasattr(self, 'assertItemsEqual'):
-                self.assertItemsEqual(obs_synonyms[i], expected_synonyms[i])
-            else:
-                self.assertCountEqual(obs_synonyms[i], expected_synonyms[i])
-
-    def test_genetic_code_with_too_many_args(self):
-        with self.assertRaises(TypeError):
-            genetic_code(1, 2)
-
-    def test_genetic_code_with_invalid_id(self):
-        with self.assertRaises(ValueError):
-            genetic_code(30)
+        seq = RNA('AUGCUAACAUAAA')  # rc = UUUAUGUUAGCAU
+
+        # test default behavior
+        exp = [Protein('MLT*'), Protein('C*HK'), Protein('ANI'),
+               Protein('FMLA'), Protein('LC*H'), Protein('YVS')]
+        obs = list(self.sgc.translate_six_frames(seq))
+        self.assertEqual(obs, exp)
+
+        # test that start/stop are respected
+        exp = [Protein('MLT'), Protein('C'), Protein('ANI'),
+               Protein('MLA'), Protein('LC'), Protein('YVS')]
+        obs = list(self.sgc.translate_six_frames(seq, start='optional',
+                                                 stop='optional'))
+        self.assertEqual(obs, exp)
+
+    def test_translate_six_frames_preserves_metadata(self):
+        seq = RNA('AUG', metadata={'foo': 'bar', 'baz': 42},
+                  positional_metadata={'foo': range(3)})
+        obs = list(self.sgc.translate_six_frames(seq))[:2]
+        # metadata retained, positional metadata dropped
+        self.assertEqual(
+            obs,
+            [Protein('M', metadata={'foo': 'bar', 'baz': 42}),
+             Protein('', metadata={'foo': 'bar', 'baz': 42})])
 
 
 if __name__ == '__main__':
-    main()
+    unittest.main()
diff --git a/skbio/sequence/tests/test_iupac_sequence.py b/skbio/sequence/tests/test_iupac_sequence.py
new file mode 100644
index 0000000..64fd0a2
--- /dev/null
+++ b/skbio/sequence/tests/test_iupac_sequence.py
@@ -0,0 +1,509 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+import six
+
+from unittest import TestCase, main
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio.sequence._iupac_sequence import IUPACSequence
+from skbio.util._decorator import classproperty
+
+
+class ExampleIUPACSequence(IUPACSequence):
+    @classproperty
+    def degenerate_map(cls):
+        return {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
+
+    @classproperty
+    def nondegenerate_chars(cls):
+        return set("ABC")
+
+
+class ExampleMotifsTester(ExampleIUPACSequence):
+    @property
+    def _motifs(self):
+        # These aren't really motifs, just a way to excercise the code paths
+        return {
+            "name1": lambda x, _, __: str(x),
+            "name2": lambda x, _, __: len(x)
+        }
+
+
+class TestIUPACSequence(TestCase):
+    def setUp(self):
+        self.lowercase_seq = ExampleIUPACSequence('AAAAaaaa', lowercase='key')
+
+    def test_instantiation_with_no_implementation(self):
+        class IUPACSequenceSubclassNoImplementation(IUPACSequence):
+            pass
+
+        with self.assertRaises(TypeError) as cm:
+            IUPACSequenceSubclassNoImplementation()
+
+        self.assertIn("abstract class", str(cm.exception))
+        self.assertIn("nondegenerate_chars", str(cm.exception))
+        self.assertIn("degenerate_map", str(cm.exception))
+
+    def test_init_default_parameters(self):
+        seq = ExampleIUPACSequence('.-ABCXYZ')
+
+        npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
+        self.assertFalse(seq.has_metadata())
+        self.assertFalse(seq.has_positional_metadata())
+
+    def test_init_nondefault_parameters(self):
+        seq = ExampleIUPACSequence('.-ABCXYZ',
+                                   metadata={'id': 'foo'},
+                                   positional_metadata={'quality': range(8)})
+
+        npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
+        self.assertTrue(seq.has_metadata())
+        self.assertEqual(seq.metadata['id'], 'foo')
+        self.assertTrue(seq.has_positional_metadata())
+        npt.assert_equal(seq.positional_metadata['quality'], np.array(range(8),
+                         dtype='int'))
+
+    def test_init_valid_empty_sequence(self):
+        # just make sure we can instantiate an empty sequence regardless of
+        # `validate` and `lowercase` parameters. more extensive tests
+        # are performed in Sequence base class unit tests
+        for validate in (True, False):
+            for lowercase in (True, False):
+                seq = ExampleIUPACSequence('', validate=validate,
+                                           lowercase=lowercase)
+                self.assertEqual(seq, ExampleIUPACSequence(''))
+
+    def test_init_valid_single_character_sequence(self):
+        for validate in (True, False):
+            for lowercase in (True, False):
+                seq = ExampleIUPACSequence('C', validate=validate,
+                                           lowercase=lowercase)
+                self.assertEqual(seq, ExampleIUPACSequence('C'))
+
+    def test_init_valid_multiple_character_sequence(self):
+        for validate in (True, False):
+            for lowercase in (True, False):
+                seq = ExampleIUPACSequence('BAACB.XYY-AZ', validate=validate,
+                                           lowercase=lowercase)
+                self.assertEqual(seq, ExampleIUPACSequence('BAACB.XYY-AZ'))
+
+    def test_init_validate_parameter_single_character(self):
+        seq = 'w'
+
+        with six.assertRaisesRegex(self, ValueError, "character.*'w'"):
+            ExampleIUPACSequence(seq)
+
+        # test that we can instantiate an invalid sequence. we don't guarantee
+        # anything working beyond instantiation
+        ExampleIUPACSequence(seq, validate=False)
+
+    def test_init_validate_parameter_multiple_characters(self):
+        # mix of valid and invalid characters with repeats and lowercased
+        # alphabet characters
+        seq = 'CBCBBbawCbbwBXYZ-.x'
+
+        with six.assertRaisesRegex(self, ValueError, "\['a', 'b', 'w', 'x'\]"):
+            ExampleIUPACSequence(seq)
+
+        ExampleIUPACSequence(seq, validate=False)
+
+    def test_init_lowercase_all_lowercase(self):
+        s = 'cbcbbbazcbbzbxyz-.x'
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   "\['a', 'b', 'c', 'x', 'y', 'z'\]"):
+            ExampleIUPACSequence(s)
+
+        seq = ExampleIUPACSequence(s, lowercase=True)
+        self.assertEqual(seq, ExampleIUPACSequence('CBCBBBAZCBBZBXYZ-.X'))
+
+    def test_init_lowercase_mixed_case(self):
+        s = 'CBCBBbazCbbzBXYZ-.x'
+
+        with six.assertRaisesRegex(self, ValueError, "\['a', 'b', 'x', 'z'\]"):
+            ExampleIUPACSequence(s)
+
+        seq = ExampleIUPACSequence(s, lowercase=True)
+        self.assertEqual(seq, ExampleIUPACSequence('CBCBBBAZCBBZBXYZ-.X'))
+
+    def test_init_lowercase_no_validation(self):
+        s = 'car'
+
+        with six.assertRaisesRegex(self, ValueError, "\['a', 'c', 'r'\]"):
+            ExampleIUPACSequence(s)
+
+        with six.assertRaisesRegex(self, ValueError, "character.*'R'"):
+            ExampleIUPACSequence(s, lowercase=True)
+
+        ExampleIUPACSequence(s, lowercase=True, validate=False)
+
+    def test_init_lowercase_byte_ownership(self):
+        bytes = np.array([97, 98, 97], dtype=np.uint8)
+
+        with six.assertRaisesRegex(self, ValueError, "\['a', 'b'\]"):
+            ExampleIUPACSequence(bytes)
+
+        seq = ExampleIUPACSequence(bytes, lowercase=True)
+        self.assertEqual(seq, ExampleIUPACSequence('ABA'))
+
+        # should not share the same memory
+        self.assertIsNot(seq._bytes, bytes)
+
+        # we should have copied `bytes` before modifying in place to convert to
+        # upper. make sure `bytes` hasn't been mutated
+        npt.assert_equal(bytes, np.array([97, 98, 97], dtype=np.uint8))
+
+    def test_init_lowercase_invalid_keys(self):
+        for invalid_key in ((), [], 2):
+            invalid_type = type(invalid_key)
+            with six.assertRaisesRegex(self, TypeError,
+                                       "lowercase keyword argument expected "
+                                       "a bool or string, but got %s" %
+                                       invalid_type):
+                ExampleIUPACSequence('ACGTacgt', lowercase=invalid_key)
+
+    def test_lowercase_mungeable_key(self):
+        # NOTE: This test relies on Sequence._munge_to_index_array working
+        # properly. If the internal implementation of the lowercase method
+        # changes to no longer use _munge_to_index_array, this test may need
+        # to be updated to cover cases currently covered by
+        # _munge_to_index_array
+        self.assertEqual('AAAAaaaa', self.lowercase_seq.lowercase('key'))
+
+    def test_lowercase_array_key(self):
+        # NOTE: This test relies on Sequence._munge_to_index_array working
+        # properly. If the internal implementation of the lowercase method
+        # changes to no longer use _munge_to_index_array, this test may need
+        # to be updated to cover cases currently covered by
+        # _munge_to_index_array
+        self.assertEqual('aaAAaaaa',
+                         self.lowercase_seq.lowercase(
+                             np.array([True, True, False, False, True, True,
+                                       True, True])))
+        self.assertEqual('AaAAaAAA',
+                         self.lowercase_seq.lowercase([1, 4]))
+
+    def test_degenerate_chars(self):
+        expected = set("XYZ")
+        self.assertIs(type(ExampleIUPACSequence.degenerate_chars), set)
+        self.assertEqual(ExampleIUPACSequence.degenerate_chars, expected)
+
+        ExampleIUPACSequence.degenerate_chars.add("W")
+        self.assertEqual(ExampleIUPACSequence.degenerate_chars, expected)
+
+        self.assertEqual(ExampleIUPACSequence('').degenerate_chars, expected)
+
+        with self.assertRaises(AttributeError):
+            ExampleIUPACSequence('').degenerate_chars = set("BAR")
+
+    def test_nondegenerate_chars(self):
+        expected = set("ABC")
+        self.assertEqual(ExampleIUPACSequence.nondegenerate_chars, expected)
+
+        ExampleIUPACSequence.degenerate_chars.add("D")
+        self.assertEqual(ExampleIUPACSequence.nondegenerate_chars, expected)
+
+        self.assertEqual(ExampleIUPACSequence('').nondegenerate_chars,
+                         expected)
+
+        with self.assertRaises(AttributeError):
+            ExampleIUPACSequence('').nondegenerate_chars = set("BAR")
+
+    def test_gap_chars(self):
+        expected = set(".-")
+        self.assertIs(type(ExampleIUPACSequence.gap_chars), set)
+        self.assertEqual(ExampleIUPACSequence.gap_chars, expected)
+
+        ExampleIUPACSequence.gap_chars.add("_")
+        self.assertEqual(ExampleIUPACSequence.gap_chars, expected)
+
+        self.assertEqual(ExampleIUPACSequence('').gap_chars, expected)
+
+        with self.assertRaises(AttributeError):
+            ExampleIUPACSequence('').gap_chars = set("_ =")
+
+    def test_alphabet(self):
+        expected = set("ABC.-XYZ")
+        self.assertIs(type(ExampleIUPACSequence.alphabet), set)
+        self.assertEqual(ExampleIUPACSequence.alphabet, expected)
+
+        ExampleIUPACSequence.alphabet.add("DEF")
+        self.assertEqual(ExampleIUPACSequence.alphabet, expected)
+
+        self.assertEqual(ExampleIUPACSequence('').alphabet, expected)
+
+        with self.assertRaises(AttributeError):
+            ExampleIUPACSequence('').alphabet = set("ABCDEFG.-WXYZ")
+
+    def test_degenerate_map(self):
+        expected = {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
+        self.assertEqual(ExampleIUPACSequence.degenerate_map, expected)
+
+        ExampleIUPACSequence.degenerate_map['W'] = set("ABC")
+        ExampleIUPACSequence.degenerate_map['X'] = set("CA")
+        self.assertEqual(ExampleIUPACSequence.degenerate_map, expected)
+
+        self.assertEqual(ExampleIUPACSequence('').degenerate_map, expected)
+
+        with self.assertRaises(AttributeError):
+            ExampleIUPACSequence('').degenerate_map = {'W': "ABC"}
+
+    def test_gaps(self):
+        self.assertIs(type(ExampleIUPACSequence("").gaps()), np.ndarray)
+        self.assertIs(ExampleIUPACSequence("").gaps().dtype, np.dtype('bool'))
+        npt.assert_equal(ExampleIUPACSequence("ABCXBZYABC").gaps(),
+                         np.zeros(10).astype(bool))
+
+        npt.assert_equal(ExampleIUPACSequence(".-.-.").gaps(),
+                         np.ones(5).astype(bool))
+
+        npt.assert_equal(ExampleIUPACSequence("A.B-C.X-Y.").gaps(),
+                         np.array([0, 1] * 5, dtype=bool))
+
+        npt.assert_equal(ExampleIUPACSequence("AB.AC.XY-").gaps(),
+                         np.array([0, 0, 1] * 3, dtype=bool))
+
+        npt.assert_equal(ExampleIUPACSequence("A.BC.-").gaps(),
+                         np.array([0, 1, 0, 0, 1, 1], dtype=bool))
+
+    def test_has_gaps(self):
+        self.assertIs(type(ExampleIUPACSequence("").has_gaps()), bool)
+        self.assertIs(type(ExampleIUPACSequence("-").has_gaps()), bool)
+
+        self.assertFalse(ExampleIUPACSequence("").has_gaps())
+        self.assertFalse(ExampleIUPACSequence("ABCXYZ").has_gaps())
+
+        self.assertTrue(ExampleIUPACSequence("-").has_gaps())
+        self.assertTrue(ExampleIUPACSequence("ABCXYZ-").has_gaps())
+
+    def test_degenerates(self):
+        self.assertIs(type(ExampleIUPACSequence("").degenerates()), np.ndarray)
+        self.assertIs(ExampleIUPACSequence("").degenerates().dtype,
+                      np.dtype('bool'))
+
+        npt.assert_equal(ExampleIUPACSequence("ABCBC-.AB.").degenerates(),
+                         np.zeros(10).astype(bool))
+
+        npt.assert_equal(ExampleIUPACSequence("ZYZYZ").degenerates(),
+                         np.ones(5).astype(bool))
+
+        npt.assert_equal(ExampleIUPACSequence("AX.Y-ZBXCZ").degenerates(),
+                         np.array([0, 1] * 5, dtype=bool))
+
+        npt.assert_equal(ExampleIUPACSequence("ABXACY.-Z").degenerates(),
+                         np.array([0, 0, 1] * 3, dtype=bool))
+
+        npt.assert_equal(ExampleIUPACSequence("AZBCXY").degenerates(),
+                         np.array([0, 1, 0, 0, 1, 1], dtype=bool))
+
+    def test_has_degenerates(self):
+        self.assertIs(type(ExampleIUPACSequence("").has_degenerates()), bool)
+        self.assertIs(type(ExampleIUPACSequence("X").has_degenerates()), bool)
+
+        self.assertFalse(ExampleIUPACSequence("").has_degenerates())
+        self.assertFalse(ExampleIUPACSequence("A-.BC").has_degenerates())
+
+        self.assertTrue(ExampleIUPACSequence("Z").has_degenerates())
+        self.assertTrue(ExampleIUPACSequence("ABC.XYZ-").has_degenerates())
+
+    def test_nondegenerates(self):
+        self.assertIs(type(ExampleIUPACSequence("").nondegenerates()),
+                      np.ndarray)
+        self.assertIs(ExampleIUPACSequence("").nondegenerates().dtype,
+                      np.dtype('bool'))
+
+        npt.assert_equal(ExampleIUPACSequence("XYZYZ-.XY.").nondegenerates(),
+                         np.zeros(10).astype(bool))
+
+        npt.assert_equal(ExampleIUPACSequence("ABABA").nondegenerates(),
+                         np.ones(5).astype(bool))
+
+        npt.assert_equal(ExampleIUPACSequence("XA.B-AZCXA").nondegenerates(),
+                         np.array([0, 1] * 5, dtype=bool))
+
+        npt.assert_equal(ExampleIUPACSequence("XXAZZB.-C").nondegenerates(),
+                         np.array([0, 0, 1] * 3, dtype=bool))
+
+        npt.assert_equal(ExampleIUPACSequence("YB.-AC").nondegenerates(),
+                         np.array([0, 1, 0, 0, 1, 1], dtype=bool))
+
+    def test_has_nondegenerates(self):
+        self.assertIs(type(ExampleIUPACSequence("").has_nondegenerates()),
+                      bool)
+        self.assertIs(type(ExampleIUPACSequence("A").has_nondegenerates()),
+                      bool)
+
+        self.assertFalse(ExampleIUPACSequence("").has_nondegenerates())
+        self.assertFalse(ExampleIUPACSequence("X-.YZ").has_nondegenerates())
+
+        self.assertTrue(ExampleIUPACSequence("C").has_nondegenerates())
+        self.assertTrue(ExampleIUPACSequence(".XYZ-ABC").has_nondegenerates())
+
+    def test_degap(self):
+        kw = {
+            'metadata': {
+                'id': 'some_id',
+                'description': 'some description',
+            },
+        }
+
+        self.assertEqual(
+            ExampleIUPACSequence("", positional_metadata={'qual': []},
+                                 **kw).degap(),
+            ExampleIUPACSequence("", positional_metadata={'qual': []},
+                                 **kw))
+
+        self.assertEqual(
+            ExampleIUPACSequence(
+                "ABCXYZ",
+                positional_metadata={'qual': np.arange(6)},
+                **kw).degap(),
+            ExampleIUPACSequence(
+                "ABCXYZ",
+                positional_metadata={'qual': np.arange(6)},
+                **kw))
+
+        self.assertEqual(
+            ExampleIUPACSequence(
+                "ABC-XYZ",
+                positional_metadata={'qual': np.arange(7)},
+                **kw).degap(),
+            ExampleIUPACSequence(
+                "ABCXYZ",
+                positional_metadata={'qual': [0, 1, 2, 4, 5, 6]},
+                **kw))
+
+        self.assertEqual(
+            ExampleIUPACSequence(
+                ".-ABC-XYZ.",
+                positional_metadata={'qual': np.arange(10)},
+                **kw).degap(),
+            ExampleIUPACSequence(
+                "ABCXYZ",
+                positional_metadata={'qual': [2, 3, 4, 6, 7, 8]},
+                **kw))
+
+        self.assertEqual(
+            ExampleIUPACSequence(
+                "---.-.-.-.-.",
+                positional_metadata={'quality': np.arange(12)},
+                **kw).degap(),
+            ExampleIUPACSequence(
+                "",
+                positional_metadata={'quality': np.array([], dtype=np.int64)},
+                **kw))
+
+    def test_expand_degenerates_no_degens(self):
+        seq = ExampleIUPACSequence("ABCABCABC")
+        self.assertEqual(list(seq.expand_degenerates()), [seq])
+
+    def test_expand_degenerates_all_degens(self):
+        exp = [ExampleIUPACSequence('ABA'), ExampleIUPACSequence('ABC'),
+               ExampleIUPACSequence('ACA'), ExampleIUPACSequence('ACC'),
+               ExampleIUPACSequence('BBA'), ExampleIUPACSequence('BBC'),
+               ExampleIUPACSequence('BCA'), ExampleIUPACSequence('BCC')]
+        # Sort based on sequence string, as order is not guaranteed.
+        obs = sorted(ExampleIUPACSequence('XYZ').expand_degenerates(), key=str)
+        self.assertEqual(obs, exp)
+
+    def test_expand_degenerates_with_metadata(self):
+        kw = {
+            "metadata": {
+                "id": "some_id",
+                "description": "some description"
+            },
+            "positional_metadata": {
+                "quality": np.arange(3),
+            },
+        }
+        exp = [ExampleIUPACSequence('ABA', **kw),
+               ExampleIUPACSequence('ABC', **kw),
+               ExampleIUPACSequence('BBA', **kw),
+               ExampleIUPACSequence('BBC', **kw)]
+        obs = sorted(ExampleIUPACSequence('XBZ', **kw).expand_degenerates(),
+                     key=str)
+        self.assertEqual(obs, exp)
+
+    def test_find_motifs_no_motif(self):
+        seq = ExampleMotifsTester("ABCABCABC")
+        with self.assertRaises(ValueError) as cm:
+            seq.find_motifs("doesn't-exist")
+        self.assertIn("doesn't-exist", str(cm.exception))
+
+        seq = ExampleIUPACSequence("ABCABCABC")
+        with self.assertRaises(ValueError) as cm:
+            seq.find_motifs("doesn't-exist")
+        self.assertIn("doesn't-exist", str(cm.exception))
+
+    def test_find_motifs(self):
+        seq = ExampleMotifsTester("ABC")
+        self.assertEqual(seq.find_motifs("name1"), "ABC")
+        self.assertEqual(seq.find_motifs("name2"), 3)
+
+    def test_repr(self):
+        # basic sanity checks for custom repr stats. more extensive testing is
+        # performed on Sequence.__repr__
+
+        # minimal
+        obs = repr(ExampleIUPACSequence(''))
+        self.assertEqual(obs.count('\n'), 7)
+        self.assertTrue(obs.startswith('ExampleIUPACSequence'))
+        self.assertIn('length: 0', obs)
+        self.assertIn('has gaps: False', obs)
+        self.assertIn('has degenerates: False', obs)
+        self.assertIn('has non-degenerates: False', obs)
+        self.assertTrue(obs.endswith('-'))
+
+        # no metadata, mix of gaps, degenerates, and non-degenerates
+        obs = repr(ExampleIUPACSequence('AY-B'))
+        self.assertEqual(obs.count('\n'), 8)
+        self.assertTrue(obs.startswith('ExampleIUPACSequence'))
+        self.assertIn('length: 4', obs)
+        self.assertIn('has gaps: True', obs)
+        self.assertIn('has degenerates: True', obs)
+        self.assertIn('has non-degenerates: True', obs)
+        self.assertTrue(obs.endswith('0 AY-B'))
+
+        # metadata and positional metadata of mixed types
+        obs = repr(
+            ExampleIUPACSequence(
+                'ABCA',
+                metadata={'foo': 42, u'bar': 33.33, None: True, False: {},
+                          (1, 2): 3, 'acb' * 100: "'"},
+                positional_metadata={'foo': range(4),
+                                     42: ['a', 'b', [], 'c']}))
+        self.assertEqual(obs.count('\n'), 18)
+        self.assertTrue(obs.startswith('ExampleIUPACSequence'))
+        self.assertIn('None: True', obs)
+        self.assertIn('\'foo\': 42', obs)
+        self.assertIn('42: <dtype: object>', obs)
+        self.assertIn('\'foo\': <dtype: int64>', obs)
+        self.assertIn('length: 4', obs)
+        self.assertIn('has gaps: False', obs)
+        self.assertIn('has degenerates: False', obs)
+        self.assertIn('has non-degenerates: True', obs)
+        self.assertTrue(obs.endswith('0 ABCA'))
+
+        # sequence spanning > 5 lines
+        obs = repr(ExampleIUPACSequence('A' * 301))
+        self.assertEqual(obs.count('\n'), 12)
+        self.assertTrue(obs.startswith('ExampleIUPACSequence'))
+        self.assertIn('length: 301', obs)
+        self.assertIn('has gaps: False', obs)
+        self.assertIn('has degenerates: False', obs)
+        self.assertIn('has non-degenerates: True', obs)
+        self.assertIn('...', obs)
+        self.assertTrue(obs.endswith('300 A'))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/sequence/tests/test_nucleotide_sequences.py b/skbio/sequence/tests/test_nucleotide_sequences.py
new file mode 100644
index 0000000..ce03b19
--- /dev/null
+++ b/skbio/sequence/tests/test_nucleotide_sequences.py
@@ -0,0 +1,474 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import unittest
+
+import six
+import numpy as np
+
+from skbio import DNA, RNA, Protein, GeneticCode
+from skbio.sequence._nucleotide_mixin import NucleotideMixin
+
+
+# This file contains tests for functionality of sequence types which implement
+# NucleotideMixin. Currently this means DNA and RNA. These types are so
+# similar that the testing logic can be shared and parameterized across
+# different test data.
+
+class TestNucelotideSequence(unittest.TestCase):
+    def setUp(self):
+        self.sequence_kinds = frozenset([
+            str,
+            lambda s: np.fromstring(s, dtype='|S1'),
+            lambda s: np.fromstring(s, dtype=np.uint8)])
+
+        dna_str = 'ACGTMRWSYKVHDBN.-'
+        dna_comp_str = 'TGCAKYWSRMBDHVN.-'
+        dna_rev_comp_str = '-.NVHDBMRSWYKACGT'
+        rna_str = 'ACGUMRWSYKVHDBN.-'
+        rna_comp_str = 'UGCAKYWSRMBDHVN.-'
+        rna_rev_comp_str = '-.NVHDBMRSWYKACGU'
+        qual = tuple(range(len(dna_str)))
+
+        self.dna = (DNA, dna_str)
+        self.rna = (RNA, rna_str)
+
+        dna_comp = self.dna + (dna_comp_str,)
+        rna_comp = self.rna + (rna_comp_str,)
+
+        dna_comp_qual = dna_comp + (qual,)
+        rna_comp_qual = rna_comp + (qual,)
+        self.all_combos_comp_qual = (dna_comp_qual, rna_comp_qual)
+
+        dna_rev_comp = self.dna + (dna_rev_comp_str,)
+        rna_rev_comp = self.rna + (rna_rev_comp_str,)
+        self.all_combos_rev_comp = (dna_rev_comp, rna_rev_comp)
+
+        dna_rev_comp_qual = dna_rev_comp + (qual,)
+        rna_rev_comp_qual = rna_rev_comp + (qual,)
+        self.all_combos_rev_comp_qual = \
+            (dna_rev_comp_qual, rna_rev_comp_qual)
+
+    def test_instantiation_with_no_implementation(self):
+        class NucleotideSequenceSubclassNoImplementation(NucleotideMixin):
+            pass
+
+        with self.assertRaises(TypeError) as cm:
+            NucleotideSequenceSubclassNoImplementation()
+
+        self.assertIn("abstract class", str(cm.exception))
+        self.assertIn("complement_map", str(cm.exception))
+
+    def test_nondegenerate_chars(self):
+        dna = (DNA, "ACGT")
+        rna = (RNA, "ACGU")
+        for constructor, nondegenerate in (dna, rna):
+            exp = set(nondegenerate)
+            self.assertEqual(constructor('').nondegenerate_chars, exp)
+            self.assertEqual(constructor.nondegenerate_chars, exp)
+
+    def test_degenerate_map(self):
+        dna_exp = (DNA, {
+            'B': set(['C', 'T', 'G']), 'D': set(['A', 'T', 'G']),
+            'H': set(['A', 'C', 'T']), 'K': set(['T', 'G']),
+            'M': set(['A', 'C']), 'N': set(['A', 'C', 'T', 'G']),
+            'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'T']),
+            'V': set(['A', 'C', 'G']), 'Y': set(['C', 'T'])
+        })
+
+        rna_exp = (RNA, {
+            'B': set(['C', 'U', 'G']), 'D': set(['A', 'U', 'G']),
+            'H': set(['A', 'C', 'U']), 'K': set(['U', 'G']),
+            'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'G']),
+            'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'U']),
+            'V': set(['A', 'C', 'G']), 'Y': set(['C', 'U'])
+        })
+
+        for constructor, degenerate in (dna_exp, rna_exp):
+            self.assertEqual(constructor('').degenerate_map, degenerate)
+            self.assertEqual(constructor.degenerate_map, degenerate)
+
+    def test_complement_map(self):
+        dna_exp = (DNA, {
+            '-': '-', '.': '.', 'A': 'T', 'C': 'G', 'B': 'V', 'D': 'H',
+            'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
+            'R': 'Y', 'T': 'A', 'W': 'W', 'V': 'B', 'Y': 'R'
+        })
+
+        rna_exp = (RNA, {
+            '-': '-', '.': '.', 'A': 'U', 'C': 'G', 'B': 'V', 'D': 'H',
+            'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
+            'R': 'Y', 'U': 'A', 'W': 'W', 'V': 'B', 'Y': 'R'
+        })
+
+        for constructor, comp_map in (dna_exp, rna_exp):
+            self.assertEqual(constructor('').complement_map, comp_map)
+            self.assertEqual(constructor.complement_map, comp_map)
+
+            # immutable
+            constructor.complement_map['A'] = 'X'
+            constructor.complement_map['C'] = 'W'
+            self.assertEqual(constructor.complement_map, comp_map)
+            with self.assertRaises(AttributeError):
+                constructor('').complement_map = {'W': 'X'}
+
+    def test_translate_ncbi_table_id(self):
+        for seq in RNA('AAAUUUAUGCAU'), DNA('AAATTTATGCAT'):
+            # default
+            obs = seq.translate()
+            self.assertEqual(obs, Protein('KFMH'))
+
+            obs = seq.translate(9)
+            self.assertEqual(obs, Protein('NFMH'))
+
+    def test_translate_genetic_code_object(self):
+        gc = GeneticCode('M' * 64, '-' * 64)
+        for seq in RNA('AAAUUUAUGCAU'), DNA('AAATTTATGCAT'):
+            obs = seq.translate(gc)
+            self.assertEqual(obs, Protein('MMMM'))
+
+    def test_translate_passes_parameters_through(self):
+        exp = Protein('MW')
+        for seq in RNA('UAAAUUGUGGUAA'), DNA('TAAATTGTGGTAA'):
+            # mix of args and kwargs
+            obs = seq.translate(13, reading_frame=2, start='require',
+                                stop='require')
+            self.assertEqual(obs, exp)
+
+            # kwargs only
+            obs = seq.translate(genetic_code=13, reading_frame=2,
+                                start='require', stop='require')
+            self.assertEqual(obs, exp)
+
+            # args only
+            obs = seq.translate(13, 2, 'require', 'require')
+            self.assertEqual(obs, exp)
+
+    def test_translate_preserves_metadata(self):
+        metadata = {'foo': 'bar', 'baz': 42}
+        positional_metadata = {'foo': range(3)}
+        for seq in (RNA('AUG', metadata=metadata,
+                        positional_metadata=positional_metadata),
+                    DNA('ATG', metadata=metadata,
+                        positional_metadata=positional_metadata)):
+            obs = seq.translate()
+            # metadata retained, positional metadata dropped
+            self.assertEqual(obs,
+                             Protein('M', metadata={'foo': 'bar', 'baz': 42}))
+
+    def test_translate_invalid_id(self):
+        for seq in RNA('AUG'), DNA('ATG'):
+            with six.assertRaisesRegex(self, ValueError, 'table_id.*42'):
+                seq.translate(42)
+
+    def test_translate_six_frames_ncbi_table_id(self):
+        # rc = CAAUUU
+        for seq in RNA('AAAUUG'), DNA('AAATTG'):
+            # default
+            obs = list(seq.translate_six_frames())
+            self.assertEqual(obs, [Protein('KL'), Protein('N'), Protein('I'),
+                                   Protein('QF'), Protein('N'), Protein('I')])
+
+            obs = list(seq.translate_six_frames(9))
+            self.assertEqual(obs, [Protein('NL'), Protein('N'), Protein('I'),
+                                   Protein('QF'), Protein('N'), Protein('I')])
+
+    def test_translate_six_frames_genetic_code_object(self):
+        gc = GeneticCode('M' * 64, '-' * 64)
+        for seq in RNA('AAAUUG'), DNA('AAATTG'):
+            obs = list(seq.translate_six_frames(gc))
+            self.assertEqual(obs, [Protein('MM'), Protein('M'), Protein('M'),
+                                   Protein('MM'), Protein('M'), Protein('M')])
+
+    def test_translate_six_frames_passes_parameters_through(self):
+        for seq in RNA('UUUAUGUGGUGA'), DNA('TTTATGTGGTGA'):
+            # mix of args and kwargs
+            obs = next(seq.translate_six_frames(11, start='require',
+                                                stop='require'))
+            self.assertEqual(obs, Protein('MW'))
+
+            # kwargs only
+            obs = next(seq.translate_six_frames(genetic_code=11,
+                                                start='require',
+                                                stop='require'))
+            self.assertEqual(obs, Protein('MW'))
+
+            # args only
+            obs = next(seq.translate_six_frames(11, 'require', 'require'))
+            self.assertEqual(obs, Protein('MW'))
+
+    def test_translate_six_frames_preserves_metadata(self):
+        metadata = {'foo': 'bar', 'baz': 42}
+        positional_metadata = {'foo': range(3)}
+        for seq in (RNA('AUG', metadata=metadata,
+                        positional_metadata=positional_metadata),
+                    DNA('ATG', metadata=metadata,
+                        positional_metadata=positional_metadata)):
+            obs = list(seq.translate_six_frames())[:2]
+            # metadata retained, positional metadata dropped
+            self.assertEqual(
+                obs,
+                [Protein('M', metadata={'foo': 'bar', 'baz': 42}),
+                 Protein('', metadata={'foo': 'bar', 'baz': 42})])
+
+    def test_translate_six_frames_invalid_id(self):
+        for seq in RNA('AUG'), DNA('ATG'):
+            with six.assertRaisesRegex(self, ValueError, 'table_id.*42'):
+                seq.translate_six_frames(42)
+
+    def test_repr(self):
+        # basic sanity checks for custom repr stats. more extensive testing is
+        # performed on Sequence.__repr__
+
+        for seq in DNA(''), RNA(''):
+            obs = repr(seq)
+            # obtained from super()
+            self.assertIn('has gaps: False', obs)
+            # custom to Protein
+            self.assertIn('GC-content: 0.00%', obs)
+
+        for seq in DNA('ACGT'), RNA('ACGU'):
+            obs = repr(seq)
+            self.assertIn('has gaps: False', obs)
+            self.assertIn('GC-content: 50.00%', obs)
+
+        for seq in DNA('CST'), RNA('CSU'):
+            obs = repr(seq)
+            self.assertIn('has gaps: False', obs)
+            self.assertIn('GC-content: 66.67%', obs)
+
+        for seq in DNA('GCSSCG'), RNA('GCSSCG'):
+            obs = repr(seq)
+            self.assertIn('has gaps: False', obs)
+            self.assertIn('GC-content: 100.00%', obs)
+
+        for seq in DNA('-GCSSCG.'), RNA('-GCSSCG.'):
+            obs = repr(seq)
+            self.assertIn('has gaps: True', obs)
+            self.assertIn('GC-content: 100.00%', obs)
+
+    def test_complement_without_reverse_empty(self):
+        for constructor in (DNA, RNA):
+            # without optional attributes
+            comp = constructor('').complement()
+            self.assertEqual(comp, constructor(''))
+
+            # with optional attributes
+            comp = constructor(
+                '',
+                metadata={'id': 'foo', 'description': 'bar'},
+                positional_metadata={'quality': []}).complement()
+            self.assertEqual(
+                comp,
+                constructor(
+                    '',
+                    metadata={'id': 'foo', 'description': 'bar'},
+                    positional_metadata={'quality': []}))
+
+    def test_complement_without_reverse_non_empty(self):
+        for (constructor, seq_str, comp_str,
+             qual) in self.all_combos_comp_qual:
+            comp = constructor(seq_str).complement()
+            self.assertEqual(comp, constructor(comp_str))
+
+            comp = constructor(
+                seq_str,
+                metadata={'id': 'foo', 'description': 'bar'},
+                positional_metadata={'quality': qual}).complement()
+            self.assertEqual(
+                comp,
+                constructor(
+                    comp_str,
+                    metadata={'id': 'foo', 'description': 'bar'},
+                    positional_metadata={'quality': qual}))
+
+    def test_complement_with_reverse_empty(self):
+        for constructor in (DNA, RNA):
+            rc = constructor('').complement(reverse=True)
+            self.assertEqual(rc, constructor(''))
+
+            rc = constructor(
+                '',
+                metadata={'id': 'foo', 'description': 'bar'},
+                positional_metadata={'quality': []}).complement(reverse=True)
+            self.assertEqual(
+                rc,
+                constructor(
+                    '',
+                    metadata={'id': 'foo', 'description': 'bar'},
+                    positional_metadata={'quality': []}))
+
+    def test_complement_with_reverse_non_empty(self):
+        for (constructor, seq_str, rev_comp_str,
+             qual) in self.all_combos_rev_comp_qual:
+            rc = constructor(seq_str).complement(reverse=True)
+            self.assertEqual(rc, constructor(rev_comp_str))
+
+            rc = constructor(
+                seq_str,
+                metadata={'id': 'foo', 'description': 'bar'},
+                positional_metadata={
+                    'quality': qual}).complement(reverse=True)
+            self.assertEqual(
+                rc,
+                constructor(
+                    rev_comp_str,
+                    metadata={'id': 'foo', 'description': 'bar'},
+                    positional_metadata={'quality':
+                                         list(qual)[::-1]}))
+
+    def test_reverse_complement(self):
+        # light tests because this just calls
+        # NucleotideSequence.complement(reverse=True), which is tested more
+        # extensively
+        for (constructor, seq_str, rev_comp_str,
+             qual) in self.all_combos_rev_comp_qual:
+            rc = constructor(
+                seq_str,
+                metadata={'id': 'foo', 'description': 'bar'},
+                positional_metadata={'quality': qual}).reverse_complement()
+            self.assertEqual(
+                rc,
+                constructor(
+                    rev_comp_str,
+                    metadata={'id': 'foo', 'description': 'bar'},
+                    positional_metadata={'quality': list(qual)[::-1]}))
+
+    def test_is_reverse_complement_varied_types(self):
+        tested = 0
+        for constructor, seq_str, rev_comp_str in self.all_combos_rev_comp:
+            seq_kinds = self.sequence_kinds.union(frozenset([constructor]))
+            for sequence in seq_kinds:
+                tested += 1
+                seq1 = constructor(seq_str)
+                seq2 = sequence(rev_comp_str)
+
+                self.assertTrue(seq1.is_reverse_complement(seq2))
+
+        self.assertEqual(tested, 8)
+
+    def test_is_reverse_complement_empty(self):
+        for constructor in (DNA, RNA):
+            seq1 = constructor('')
+            self.assertTrue(seq1.is_reverse_complement(seq1))
+
+            # optional attributes are ignored, only the sequence is compared
+            seq2 = constructor(
+                '',
+                metadata={'id': 'foo', 'description': 'bar'},
+                positional_metadata={'quality':
+                                     np.array([], dtype=np.int64)})
+            self.assertTrue(seq2.is_reverse_complement(seq2))
+            self.assertTrue(seq1.is_reverse_complement(seq2))
+            self.assertTrue(seq2.is_reverse_complement(seq1))
+
+    def test_is_reverse_complement_metadata_ignored(self):
+        for (constructor, seq_str, rev_comp_str,
+             qual) in self.all_combos_rev_comp_qual:
+            seq1 = constructor(seq_str)
+            seq2 = constructor(
+                rev_comp_str,
+                metadata={'id': 'foo', 'description': 'bar'},
+                positional_metadata={'quality': qual})
+
+            self.assertFalse(seq1.is_reverse_complement(seq1))
+            self.assertFalse(seq2.is_reverse_complement(seq2))
+
+            self.assertTrue(seq1.is_reverse_complement(seq2))
+            self.assertTrue(seq2.is_reverse_complement(seq1))
+
+    def test_is_reverse_complement_non_reverse_complements(self):
+        for constructor in (DNA, RNA):
+            # same length
+            seq1 = constructor('ACAG')
+            seq2 = constructor('AAAA')
+
+            self.assertFalse(seq1.is_reverse_complement(seq1))
+            self.assertFalse(seq2.is_reverse_complement(seq2))
+
+            self.assertFalse(seq1.is_reverse_complement(seq2))
+            self.assertFalse(seq2.is_reverse_complement(seq1))
+
+            # different length
+            seq1 = constructor('ACAG')
+            seq2 = constructor('AAAAA')
+
+            self.assertFalse(seq1.is_reverse_complement(seq1))
+            self.assertFalse(seq2.is_reverse_complement(seq2))
+
+            self.assertFalse(seq1.is_reverse_complement(seq2))
+            self.assertFalse(seq2.is_reverse_complement(seq1))
+
+    def test_is_reverse_complement_type_mismatch(self):
+        for Class in (DNA, RNA):
+            class Subclass(Class):
+                pass
+            seq1 = Class('ABC')
+            seq2 = Subclass('ABC')
+
+            with self.assertRaises(TypeError):
+                seq1.is_reverse_complement(seq2)
+
+    def test_motif_purine_run(self):
+        dna = (DNA, "AARC--TCRG", "AA-RC--TCR-G")
+        rna = (RNA, "AARC--UCRG", "AA-RC--UCR-G")
+        all_sets = (dna, rna)
+
+        for constructor, run1, run2 in all_sets:
+            seq = constructor("")
+            self.assertEqual(list(seq.find_motifs("purine-run")), [])
+
+            seq = constructor(run1)
+            self.assertEqual(list(seq.find_motifs("purine-run")),
+                             [slice(0, 3), slice(8, 10)])
+
+            seq = constructor(run2)
+            self.assertEqual(list(seq.find_motifs("purine-run", min_length=3,
+                                                  ignore=seq.gaps())),
+                             [slice(0, 4)])
+
+    def test_motif_pyrimidine_run(self):
+        dna = (DNA, "AARC--TCRA", "AA-RC--TCR-A")
+        rna = (RNA, "AARC--UCRG", "AA-RC--UCR-G")
+        all_sets = (dna, rna)
+
+        for constructor, run1, run2 in all_sets:
+            seq = constructor("")
+            self.assertEqual(list(seq.find_motifs("pyrimidine-run")), [])
+
+            seq = constructor(run1)
+            self.assertEqual(list(seq.find_motifs("pyrimidine-run")),
+                             [slice(3, 4), slice(6, 8)])
+
+            seq = constructor(run2)
+            self.assertEqual(list(seq.find_motifs("pyrimidine-run",
+                                                  min_length=3,
+                                                  ignore=seq.gaps())),
+                             [slice(4, 9)])
+
+    def test_gc_frequency_and_gc_content(self):
+        universal_sets = (('', 0, 0.0), ('ADDDH', 0, 0.0), ('ACGA', 2, 0.5),
+                          ('ACGS', 3, 0.75), ('AAAAAAAG', 1, 0.125),
+                          ('CCC', 3, 1.0), ('GGG', 3, 1.0), ('SSS', 3, 1.0),
+                          ('CGS', 3, 1.0), ('----....', 0, 0.0),
+                          ('G--..', 1, 1.0), ('ACGA', 2, 0.5))
+        dna = (DNA, universal_sets + (('ATMRWYKVHDBN.-', 0, 0.0),))
+        rna = (RNA, universal_sets + (('AUMRWYKVHDBN.-', 0, 0.0),))
+        for constructor, current_set in (dna, rna):
+            for seq_str, count, ratio in current_set:
+                seq = constructor(seq_str)
+                self.assertEqual(count, seq.gc_frequency())
+                self.assertEqual(count, seq.gc_frequency(relative=False))
+                self.assertEqual(ratio, seq.gc_frequency(relative=True))
+                self.assertEqual(ratio, seq.gc_content())
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/skbio/sequence/tests/test_protein.py b/skbio/sequence/tests/test_protein.py
new file mode 100644
index 0000000..87c3159
--- /dev/null
+++ b/skbio/sequence/tests/test_protein.py
@@ -0,0 +1,124 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+import unittest
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio import Protein
+
+
+class TestProtein(unittest.TestCase):
+    def test_alphabet(self):
+        expected = set("ACDEFGHIKLMNPQRSTVWYBZX-.*")
+        self.assertIs(type(Protein.alphabet), set)
+        self.assertEqual(Protein.alphabet, expected)
+
+        Protein.alphabet.add("JO")
+        self.assertEqual(Protein.alphabet, expected)
+        self.assertEqual(Protein('').alphabet, expected)
+
+        with self.assertRaises(AttributeError):
+            Protein('').alphabet = set("ABCD")
+
+    def test_nondegenerate_chars(self):
+        exp = set("ACDEFGHIKLMNPQRSTVWY")
+        self.assertEqual(Protein("").nondegenerate_chars, exp)
+        self.assertEqual(Protein.nondegenerate_chars, exp)
+
+    def test_degenerate_map(self):
+        exp = {
+            'B': set(['D', 'N']), 'Z': set(['E', 'Q']),
+            'X': set(['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M',
+                      'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'])
+        }
+        self.assertEqual(Protein("").degenerate_map, exp)
+        self.assertEqual(Protein.degenerate_map, exp)
+
+    def test_stop_chars(self):
+        expected = set('*')
+        self.assertIs(type(Protein.stop_chars), set)
+        self.assertEqual(Protein.stop_chars, expected)
+
+        Protein.stop_chars.add("JO")
+        self.assertEqual(Protein.stop_chars, expected)
+        self.assertEqual(Protein('').stop_chars, expected)
+
+        with self.assertRaises(AttributeError):
+            Protein('').stop_chars = set("^&")
+
+    def test_stops(self):
+        npt.assert_array_equal(Protein('').stops(), np.array([]))
+
+        npt.assert_array_equal(Protein('P').stops(), np.array([False]))
+
+        npt.assert_array_equal(Protein('PAW').stops(),
+                               np.array([False, False, False]))
+
+        npt.assert_array_equal(Protein('PAW*').stops(),
+                               np.array([False, False, False, True]))
+
+        npt.assert_array_equal(Protein('P*W*').stops(),
+                               np.array([False, True, False, True]))
+
+        npt.assert_array_equal(Protein('****').stops(),
+                               np.array([True, True, True, True]))
+
+        npt.assert_array_equal(Protein('XZB-.').stops(),
+                               np.array([False, False, False, False, False]))
+
+    def test_has_stops(self):
+        self.assertFalse(Protein('').has_stops())
+        self.assertFalse(Protein('P').has_stops())
+        self.assertFalse(Protein('PAW').has_stops())
+        self.assertTrue(Protein('PAW*').has_stops())
+        self.assertTrue(Protein('P*W*').has_stops())
+        self.assertTrue(Protein('****').has_stops())
+        self.assertFalse(Protein('XZB-.').has_stops())
+
+    def test_motif_n_glycosylation(self):
+        seq = Protein("ACDFFACGNPSL")
+        self.assertEqual(list(seq.find_motifs("N-glycosylation")), [])
+
+        seq = Protein("ACDFNFTACGNPSL")
+        self.assertEqual(list(seq.find_motifs("N-glycosylation")),
+                         [slice(4, 8)])
+
+        seq = Protein("AC-DFN-FTACGNPSL")
+        self.assertEqual(list(seq.find_motifs("N-glycosylation",
+                                              ignore=seq.gaps())),
+                         [slice(5, 10)])
+
+    def test_repr(self):
+        # basic sanity checks for custom repr stats. more extensive testing is
+        # performed on Sequence.__repr__
+
+        obs = repr(Protein(''))
+        # obtained from super()
+        self.assertIn('has gaps: False', obs)
+        # custom to Protein
+        self.assertIn('has stops: False', obs)
+
+        obs = repr(Protein('PAW'))
+        self.assertIn('has gaps: False', obs)
+        self.assertIn('has stops: False', obs)
+
+        obs = repr(Protein('PA*W-'))
+        self.assertIn('has gaps: True', obs)
+        self.assertIn('has stops: True', obs)
+
+        obs = repr(Protein('*****'))
+        self.assertIn('has gaps: False', obs)
+        self.assertIn('has stops: True', obs)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/skbio/sequence/tests/test_sequence.py b/skbio/sequence/tests/test_sequence.py
index 8d3799d..3c5578e 100644
--- a/skbio/sequence/tests/test_sequence.py
+++ b/skbio/sequence/tests/test_sequence.py
@@ -7,1412 +7,2620 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-from future.standard_library import hooks
+import six
+from six.moves import zip_longest
 
-from re import compile as re_compile
-from collections import Counter, defaultdict
+import copy
+import re
+from types import GeneratorType
+from collections import Counter, defaultdict, Hashable
 from unittest import TestCase, main
 
 import numpy as np
 import numpy.testing as npt
+import pandas as pd
 
-from skbio import (
-    BiologicalSequence, NucleotideSequence, DNASequence, RNASequence,
-    ProteinSequence)
-from skbio.sequence import BiologicalSequenceError
+from skbio import Sequence
+from skbio.util import assert_data_frame_almost_equal
+from skbio.sequence._sequence import (_single_index_to_slice, _is_single_index,
+                                      _as_slice_if_single_index)
 
-with hooks():
-    from itertools import zip_longest
 
+class SequenceSubclass(Sequence):
+    """Used for testing purposes."""
+    pass
 
-class BiologicalSequenceTests(TestCase):
 
+class TestSequence(TestCase):
     def setUp(self):
-        self.b1 = BiologicalSequence('GATTACA', quality=range(7))
-        self.b2 = BiologicalSequence(
-            'ACCGGTACC', id="test-seq-2",
-            description="A test sequence")
-        self.b3 = BiologicalSequence(
-            'GREG', id="test-seq-3", description="A protein sequence")
-        self.b4 = BiologicalSequence(
-            'PRTEIN', id="test-seq-4")
-        self.b5 = BiologicalSequence(
-            'LLPRTEIN', description="some description")
-        self.b6 = BiologicalSequence('ACGTACGTACGT')
-        self.b7 = BiologicalSequence('..--..', quality=range(6))
-        self.b8 = BiologicalSequence('HE..--..LLO', id='hello',
-                                     description='gapped hello',
-                                     quality=range(11))
-
-    def test_init_varied_input(self):
-        # init as string
-        b = BiologicalSequence('ACCGGXZY')
-        self.assertEqual(str(b), 'ACCGGXZY')
-        self.assertEqual(b.id, "")
-        self.assertEqual(b.description, "")
-
-        # init as string with optional values
-        b = BiologicalSequence(
-            'ACCGGXZY', 'test-seq-1', 'The first test sequence')
-        self.assertEqual(str(b), 'ACCGGXZY')
-        self.assertEqual(b.id, "test-seq-1")
-        self.assertEqual(b.description, "The first test sequence")
-
-        # test init as a different string
-        b = BiologicalSequence('WRRTY')
-        self.assertEqual(str(b), 'WRRTY')
-
-        # init as list
-        b = BiologicalSequence(list('ACCGGXZY'))
-        self.assertEqual(str(b), 'ACCGGXZY')
-        self.assertEqual(b.id, "")
-        self.assertEqual(b.description, "")
-
-        # init as tuple
-        b = BiologicalSequence(tuple('ACCGGXZY'))
-        self.assertEqual(str(b), 'ACCGGXZY')
-        self.assertEqual(b.id, "")
-        self.assertEqual(b.description, "")
-
-    def test_init_with_validation(self):
-        self.assertRaises(BiologicalSequenceError, BiologicalSequence, "ACC",
-                          validate=True)
-        try:
-            # no error raised when only allow characters are passed
-            BiologicalSequence("..--..", validate=True)
-        except BiologicalSequenceError:
-            self.assertTrue(False)
-
-    def test_init_with_invalid_quality(self):
-        # invalid dtype
-        with self.assertRaises(TypeError):
-            BiologicalSequence('ACGT', quality=[2, 3, 4.1, 5])
+        self.sequence_kinds = frozenset([
+            str, Sequence, lambda s: np.fromstring(s, dtype='|S1'),
+            lambda s: np.fromstring(s, dtype=np.uint8)])
+
+        def empty_generator():
+            raise StopIteration()
+            yield
+
+        self.getitem_empty_indices = [
+            [],
+            (),
+            {},
+            empty_generator(),
+            # ndarray of implicit float dtype
+            np.array([]),
+            np.array([], dtype=int)]
+
+    def test_init_default_parameters(self):
+        seq = Sequence('.ABC123xyz-')
+
+        npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
+        self.assertEqual('.ABC123xyz-', str(seq))
+        self.assertFalse(seq.has_metadata())
+        self.assertEqual(seq.metadata, {})
+        self.assertFalse(seq.has_positional_metadata())
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame(index=np.arange(11)))
+
+    def test_init_nondefault_parameters(self):
+        seq = Sequence('.ABC123xyz-',
+                       metadata={'id': 'foo', 'description': 'bar baz'},
+                       positional_metadata={'quality': range(11)})
+
+        npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
+        self.assertEqual('.ABC123xyz-', str(seq))
+
+        self.assertTrue(seq.has_metadata())
+        self.assertEqual(seq.metadata, {'id': 'foo', 'description': 'bar baz'})
+
+        self.assertTrue(seq.has_positional_metadata())
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'quality': range(11)}, index=np.arange(11)))
+
+    def test_init_handles_missing_metadata_efficiently(self):
+        seq = Sequence('ACGT')
+
+        # metadata attributes should be None and not initialized to a "missing"
+        # representation
+        self.assertIsNone(seq._metadata)
+        self.assertIsNone(seq._positional_metadata)
+
+        # initializing from an existing Sequence object should handle metadata
+        # attributes efficiently on both objects
+        new_seq = Sequence(seq)
+        self.assertIsNone(seq._metadata)
+        self.assertIsNone(seq._positional_metadata)
+        self.assertIsNone(new_seq._metadata)
+        self.assertIsNone(new_seq._positional_metadata)
+
+        self.assertFalse(seq.has_metadata())
+        self.assertFalse(seq.has_positional_metadata())
+        self.assertFalse(new_seq.has_metadata())
+        self.assertFalse(new_seq.has_positional_metadata())
+
+    def test_init_empty_sequence(self):
+        # Test constructing an empty sequence using each supported input type.
+        for s in (b'',  # bytes
+                  u'',  # unicode
+                  np.array('', dtype='c'),  # char vector
+                  np.fromstring('', dtype=np.uint8),  # byte vec
+                  Sequence('')):  # another Sequence object
+            seq = Sequence(s)
+
+            self.assertIsInstance(seq.values, np.ndarray)
+            self.assertEqual(seq.values.dtype, '|S1')
+            self.assertEqual(seq.values.shape, (0, ))
+            npt.assert_equal(seq.values, np.array('', dtype='c'))
+            self.assertEqual(str(seq), '')
+            self.assertEqual(len(seq), 0)
+
+            self.assertFalse(seq.has_metadata())
+            self.assertEqual(seq.metadata, {})
+
+            self.assertFalse(seq.has_positional_metadata())
+            assert_data_frame_almost_equal(seq.positional_metadata,
+                                           pd.DataFrame(index=np.arange(0)))
+
+    def test_init_single_character_sequence(self):
+        for s in (b'A',
+                  u'A',
+                  np.array('A', dtype='c'),
+                  np.fromstring('A', dtype=np.uint8),
+                  Sequence('A')):
+            seq = Sequence(s)
+
+            self.assertIsInstance(seq.values, np.ndarray)
+            self.assertEqual(seq.values.dtype, '|S1')
+            self.assertEqual(seq.values.shape, (1,))
+            npt.assert_equal(seq.values, np.array('A', dtype='c'))
+            self.assertEqual(str(seq), 'A')
+            self.assertEqual(len(seq), 1)
+
+            self.assertFalse(seq.has_metadata())
+            self.assertEqual(seq.metadata, {})
+
+            self.assertFalse(seq.has_positional_metadata())
+            assert_data_frame_almost_equal(seq.positional_metadata,
+                                           pd.DataFrame(index=np.arange(1)))
+
+    def test_init_multiple_character_sequence(self):
+        for s in (b'.ABC\t123  xyz-',
+                  u'.ABC\t123  xyz-',
+                  np.array('.ABC\t123  xyz-', dtype='c'),
+                  np.fromstring('.ABC\t123  xyz-', dtype=np.uint8),
+                  Sequence('.ABC\t123  xyz-')):
+            seq = Sequence(s)
+
+            self.assertIsInstance(seq.values, np.ndarray)
+            self.assertEqual(seq.values.dtype, '|S1')
+            self.assertEqual(seq.values.shape, (14,))
+            npt.assert_equal(seq.values,
+                             np.array('.ABC\t123  xyz-', dtype='c'))
+            self.assertEqual(str(seq), '.ABC\t123  xyz-')
+            self.assertEqual(len(seq), 14)
+
+            self.assertFalse(seq.has_metadata())
+            self.assertEqual(seq.metadata, {})
+
+            self.assertFalse(seq.has_positional_metadata())
+            assert_data_frame_almost_equal(seq.positional_metadata,
+                                           pd.DataFrame(index=np.arange(14)))
+
+    def test_init_from_sequence_object(self):
+        # We're testing this in its simplest form in other tests. This test
+        # exercises more complicated cases of building a sequence from another
+        # sequence.
+
+        # just the sequence, no other metadata
+        seq = Sequence('ACGT')
+        self.assertEqual(Sequence(seq), seq)
+
+        # sequence with metadata should have everything propagated
+        seq = Sequence('ACGT',
+                       metadata={'id': 'foo', 'description': 'bar baz'},
+                       positional_metadata={'quality': range(4)})
+        self.assertEqual(Sequence(seq), seq)
+
+        # should be able to override metadata
+        self.assertEqual(
+            Sequence(seq, metadata={'id': 'abc', 'description': '123'},
+                     positional_metadata={'quality': [42] * 4}),
+            Sequence('ACGT', metadata={'id': 'abc', 'description': '123'},
+                     positional_metadata={'quality': [42] * 4}))
+
+        # subclasses work too
+        seq = SequenceSubclass('ACGT',
+                               metadata={'id': 'foo',
+                                         'description': 'bar baz'},
+                               positional_metadata={'quality': range(4)})
+        self.assertEqual(
+            Sequence(seq),
+            Sequence('ACGT', metadata={'id': 'foo', 'description': 'bar baz'},
+                     positional_metadata={'quality': range(4)}))
 
-        # wrong number of dimensions (2-D)
-        with self.assertRaisesRegexp(BiologicalSequenceError, '1-D'):
-            BiologicalSequence('ACGT', quality=[[2, 3], [4, 5]])
+    def test_init_from_contiguous_sequence_bytes_view(self):
+        bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
+        view = bytes[:3]
+        seq = Sequence(view)
 
-        # wrong number of elements
-        with self.assertRaisesRegexp(BiologicalSequenceError, '\(3\).*\(4\)'):
-            BiologicalSequence('ACGT', quality=[2, 3, 4])
+        # sequence should be what we'd expect
+        self.assertEqual(seq, Sequence('A*B'))
 
-        # negatives
-        with self.assertRaisesRegexp(BiologicalSequenceError,
-                                     'quality scores.*greater than.*zero'):
-            BiologicalSequence('ACGT', quality=[2, 3, -1, 4])
+        # we shouldn't own the memory because no copy should have been made
+        self.assertFalse(seq._owns_bytes)
 
-    def test_contains(self):
-        self.assertTrue('G' in self.b1)
-        self.assertFalse('g' in self.b1)
+        # can't mutate view because it isn't writeable anymore
+        with self.assertRaises(ValueError):
+            view[1] = 100
 
-    def test_eq_and_ne(self):
-        self.assertTrue(self.b1 == self.b1)
-        self.assertTrue(self.b2 == self.b2)
-        self.assertTrue(self.b3 == self.b3)
+        # sequence shouldn't have changed
+        self.assertEqual(seq, Sequence('A*B'))
 
-        self.assertTrue(self.b1 != self.b3)
-        self.assertTrue(self.b1 != self.b2)
-        self.assertTrue(self.b2 != self.b3)
+        # mutate bytes (*not* the view)
+        bytes[0] = 99
 
-        # identicial sequences of the same type are equal, even if they have
-        # different ids, descriptions, and/or quality
-        self.assertTrue(
-            BiologicalSequence('ACGT') == BiologicalSequence('ACGT'))
-        self.assertTrue(
-            BiologicalSequence('ACGT', id='a') ==
-            BiologicalSequence('ACGT', id='b'))
-        self.assertTrue(
-            BiologicalSequence('ACGT', description='c') ==
-            BiologicalSequence('ACGT', description='d'))
-        self.assertTrue(
-            BiologicalSequence('ACGT', id='a', description='c') ==
-            BiologicalSequence('ACGT', id='b', description='d'))
-        self.assertTrue(
-            BiologicalSequence('ACGT', id='a', description='c',
-                               quality=[1, 2, 3, 4]) ==
-            BiologicalSequence('ACGT', id='b', description='d',
-                               quality=[5, 6, 7, 8]))
-
-        # different type causes sequences to not be equal
-        self.assertFalse(
-            BiologicalSequence('ACGT') == NucleotideSequence('ACGT'))
-
-    def test_getitem(self):
-        # use equals method to ensure that id, description, and sliced
-        # quality are correctly propagated to the resulting sequence
-        self.assertTrue(self.b1[0].equals(
-            BiologicalSequence('G', quality=(0,))))
-
-        self.assertTrue(self.b1[:].equals(
-            BiologicalSequence('GATTACA', quality=range(7))))
-
-        self.assertTrue(self.b1[::-1].equals(
-            BiologicalSequence('ACATTAG', quality=range(7)[::-1])))
-
-        # test a sequence without quality scores
-        b = BiologicalSequence('ACGT', id='foo', description='bar')
-        self.assertTrue(b[2:].equals(
-            BiologicalSequence('GT', id='foo', description='bar')))
-        self.assertTrue(b[2].equals(
-            BiologicalSequence('G', id='foo', description='bar')))
-
-    def test_getitem_indices(self):
-        # no ordering, repeated items
-        self.assertTrue(self.b1[[3, 5, 4, 0, 5, 0]].equals(
-            BiologicalSequence('TCAGCG', quality=(3, 5, 4, 0, 5, 0))))
-
-        # empty list
-        self.assertTrue(self.b1[[]].equals(BiologicalSequence('', quality=())))
-
-        # empty tuple
-        self.assertTrue(self.b1[()].equals(BiologicalSequence('', quality=())))
-
-        # single item
-        self.assertTrue(
-            self.b1[[2]].equals(BiologicalSequence('T', quality=(2,))))
+        # Sequence changed because we are only able to make the view read-only,
+        # not its source (bytes). This is somewhat inconsistent behavior that
+        # is (to the best of our knowledge) outside our control.
+        self.assertEqual(seq, Sequence('c*B'))
 
-        # negatives
-        self.assertTrue(self.b1[[2, -2, 4]].equals(
-            BiologicalSequence('TCA', quality=(2, 5, 4))))
+    def test_init_from_noncontiguous_sequence_bytes_view(self):
+        bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
+        view = bytes[::2]
+        seq = Sequence(view)
 
-        # tuple
-        self.assertTrue(self.b1[1, 2, 3].equals(
-            BiologicalSequence('ATT', quality=(1, 2, 3))))
-        self.assertTrue(self.b1[(1, 2, 3)].equals(
-            BiologicalSequence('ATT', quality=(1, 2, 3))))
+        # sequence should be what we'd expect
+        self.assertEqual(seq, Sequence('ABA'))
 
-        # test a sequence without quality scores
-        self.assertTrue(self.b2[5, 4, 1].equals(
-            BiologicalSequence('TGC', id='test-seq-2',
-                               description='A test sequence')))
+        # we should own the memory because a copy should have been made
+        self.assertTrue(seq._owns_bytes)
 
-    def test_getitem_wrong_type(self):
-        with self.assertRaises(TypeError):
-            self.b1['1']
+        # mutate bytes and its view
+        bytes[0] = 99
+        view[1] = 100
 
-    def test_getitem_out_of_range(self):
-        # seq with quality
-        with self.assertRaises(IndexError):
-            self.b1[42]
-        with self.assertRaises(IndexError):
-            self.b1[[1, 0, 23, 3]]
+        # sequence shouldn't have changed
+        self.assertEqual(seq, Sequence('ABA'))
 
-        # seq without quality
-        with self.assertRaises(IndexError):
-            self.b2[43]
-        with self.assertRaises(IndexError):
-            self.b2[[2, 3, 22, 1]]
+    def test_init_no_copy_of_sequence(self):
+        bytes = np.array([65, 66, 65], dtype=np.uint8)
+        seq = Sequence(bytes)
 
-    def test_hash(self):
-        self.assertTrue(isinstance(hash(self.b1), int))
+        # should share the same memory
+        self.assertIs(seq._bytes, bytes)
 
-    def test_iter(self):
-        b1_iter = iter(self.b1)
-        for actual, expected in zip(b1_iter, "GATTACA"):
-            self.assertEqual(actual, expected)
+        # shouldn't be able to mutate the Sequence object's internals by
+        # mutating the shared memory
+        with self.assertRaises(ValueError):
+            bytes[1] = 42
 
-        self.assertRaises(StopIteration, lambda: next(b1_iter))
+    def test_init_empty_metadata(self):
+        for empty in None, {}:
+            seq = Sequence('', metadata=empty)
 
-    def _compare_k_words_results(self, observed, expected):
-        for obs, exp in zip_longest(observed, expected, fillvalue=None):
-            # use equals to compare quality, id, description, sequence, and
-            # type
-            self.assertTrue(obs.equals(exp))
+            self.assertFalse(seq.has_metadata())
+            self.assertEqual(seq.metadata, {})
 
-    def test_k_words_overlapping_true(self):
-        expected = [
-            BiologicalSequence('G', quality=[0]),
-            BiologicalSequence('A', quality=[1]),
-            BiologicalSequence('T', quality=[2]),
-            BiologicalSequence('T', quality=[3]),
-            BiologicalSequence('A', quality=[4]),
-            BiologicalSequence('C', quality=[5]),
-            BiologicalSequence('A', quality=[6])
-        ]
-        self._compare_k_words_results(
-            self.b1.k_words(1, overlapping=True), expected)
+    def test_init_empty_metadata_key(self):
+        seq = Sequence('', metadata={'': ''})
 
-        expected = [
-            BiologicalSequence('GA', quality=[0, 1]),
-            BiologicalSequence('AT', quality=[1, 2]),
-            BiologicalSequence('TT', quality=[2, 3]),
-            BiologicalSequence('TA', quality=[3, 4]),
-            BiologicalSequence('AC', quality=[4, 5]),
-            BiologicalSequence('CA', quality=[5, 6])
-        ]
-        self._compare_k_words_results(
-            self.b1.k_words(2, overlapping=True), expected)
+        self.assertTrue(seq.has_metadata())
+        self.assertEqual(seq.metadata, {'': ''})
 
-        expected = [
-            BiologicalSequence('GAT', quality=[0, 1, 2]),
-            BiologicalSequence('ATT', quality=[1, 2, 3]),
-            BiologicalSequence('TTA', quality=[2, 3, 4]),
-            BiologicalSequence('TAC', quality=[3, 4, 5]),
-            BiologicalSequence('ACA', quality=[4, 5, 6])
-        ]
-        self._compare_k_words_results(
-            self.b1.k_words(3, overlapping=True), expected)
+    def test_init_empty_metadata_item(self):
+        seq = Sequence('', metadata={'foo': ''})
 
-        expected = [
-            BiologicalSequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
-        ]
-        self._compare_k_words_results(
-            self.b1.k_words(7, overlapping=True), expected)
+        self.assertTrue(seq.has_metadata())
+        self.assertEqual(seq.metadata, {'foo': ''})
 
-        self.assertEqual(list(self.b1.k_words(8, overlapping=True)), [])
+    def test_init_single_character_metadata_item(self):
+        seq = Sequence('', metadata={'foo': 'z'})
 
-    def test_k_words_overlapping_false(self):
-        expected = [
-            BiologicalSequence('G', quality=[0]),
-            BiologicalSequence('A', quality=[1]),
-            BiologicalSequence('T', quality=[2]),
-            BiologicalSequence('T', quality=[3]),
-            BiologicalSequence('A', quality=[4]),
-            BiologicalSequence('C', quality=[5]),
-            BiologicalSequence('A', quality=[6])
-        ]
-        self._compare_k_words_results(
-            self.b1.k_words(1, overlapping=False), expected)
+        self.assertTrue(seq.has_metadata())
+        self.assertEqual(seq.metadata, {'foo': 'z'})
 
-        expected = [
-            BiologicalSequence('GA', quality=[0, 1]),
-            BiologicalSequence('TT', quality=[2, 3]),
-            BiologicalSequence('AC', quality=[4, 5])
-        ]
-        self._compare_k_words_results(
-            self.b1.k_words(2, overlapping=False), expected)
+    def test_init_multiple_character_metadata_item(self):
+        seq = Sequence('', metadata={'foo': '\nabc\tdef  G123'})
 
-        expected = [
-            BiologicalSequence('GAT', quality=[0, 1, 2]),
-            BiologicalSequence('TAC', quality=[3, 4, 5])
-        ]
-        self._compare_k_words_results(
-            self.b1.k_words(3, overlapping=False), expected)
+        self.assertTrue(seq.has_metadata())
+        self.assertEqual(seq.metadata, {'foo': '\nabc\tdef  G123'})
 
-        expected = [
-            BiologicalSequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
-        ]
-        self._compare_k_words_results(
-            self.b1.k_words(7, overlapping=False), expected)
+    def test_init_metadata_multiple_keys(self):
+        seq = Sequence('', metadata={'foo': 'abc', 42: {'nested': 'metadata'}})
 
-        self.assertEqual(list(self.b1.k_words(8, overlapping=False)), [])
+        self.assertTrue(seq.has_metadata())
+        self.assertEqual(seq.metadata,
+                         {'foo': 'abc', 42: {'nested': 'metadata'}})
 
-    def test_k_words_invalid_k(self):
-        with self.assertRaises(ValueError):
-            list(self.b1.k_words(0))
+    def test_init_empty_positional_metadata(self):
+        # empty seq with missing/empty positional metadata
+        for empty in None, {}, pd.DataFrame():
+            seq = Sequence('', positional_metadata=empty)
 
-        with self.assertRaises(ValueError):
-            list(self.b1.k_words(-42))
+            self.assertFalse(seq.has_metadata())
+            self.assertEqual(seq.metadata, {})
 
-    def test_k_words_different_sequences(self):
-        expected = [
-            BiologicalSequence('HE.', quality=[0, 1, 2], id='hello',
-                               description='gapped hello'),
-            BiologicalSequence('.--', quality=[3, 4, 5], id='hello',
-                               description='gapped hello'),
-            BiologicalSequence('..L', quality=[6, 7, 8], id='hello',
-                               description='gapped hello')
-        ]
-        self._compare_k_words_results(
-            self.b8.k_words(3, overlapping=False), expected)
+            self.assertFalse(seq.has_positional_metadata())
+            assert_data_frame_almost_equal(seq.positional_metadata,
+                                           pd.DataFrame(index=np.arange(0)))
 
-        b = BiologicalSequence('')
-        self.assertEqual(list(b.k_words(3)), [])
+        # non-empty seq with missing positional metadata
+        seq = Sequence('xyz', positional_metadata=None)
 
-    def test_k_word_counts(self):
-        # overlapping = True
-        expected = Counter('GATTACA')
-        self.assertEqual(self.b1.k_word_counts(1, overlapping=True),
-                         expected)
-        expected = Counter(['GAT', 'ATT', 'TTA', 'TAC', 'ACA'])
-        self.assertEqual(self.b1.k_word_counts(3, overlapping=True),
-                         expected)
+        self.assertFalse(seq.has_metadata())
+        self.assertEqual(seq.metadata, {})
 
-        # overlapping = False
-        expected = Counter(['GAT', 'TAC'])
-        self.assertEqual(self.b1.k_word_counts(3, overlapping=False),
-                         expected)
-        expected = Counter(['GATTACA'])
-        self.assertEqual(self.b1.k_word_counts(7, overlapping=False),
-                         expected)
+        self.assertFalse(seq.has_positional_metadata())
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame(index=np.arange(3)))
 
-    def test_k_word_frequencies(self):
-        # overlapping = True
-        expected = defaultdict(float)
-        expected['A'] = 3/7.
-        expected['C'] = 1/7.
-        expected['G'] = 1/7.
-        expected['T'] = 2/7.
-        self.assertEqual(self.b1.k_word_frequencies(1, overlapping=True),
-                         expected)
-        expected = defaultdict(float)
-        expected['GAT'] = 1/5.
-        expected['ATT'] = 1/5.
-        expected['TTA'] = 1/5.
-        expected['TAC'] = 1/5.
-        expected['ACA'] = 1/5.
-        self.assertEqual(self.b1.k_word_frequencies(3, overlapping=True),
-                         expected)
+    def test_init_empty_positional_metadata_item(self):
+        for item in ([], (), np.array([])):
+            seq = Sequence('', positional_metadata={'foo': item})
 
-        # overlapping = False
-        expected = defaultdict(float)
-        expected['GAT'] = 1/2.
-        expected['TAC'] = 1/2.
-        self.assertEqual(self.b1.k_word_frequencies(3, overlapping=False),
-                         expected)
-        expected = defaultdict(float)
-        expected['GATTACA'] = 1.0
-        self.assertEqual(self.b1.k_word_frequencies(7, overlapping=False),
-                         expected)
-        expected = defaultdict(float)
-        empty = BiologicalSequence('')
-        self.assertEqual(empty.k_word_frequencies(1, overlapping=False),
-                         expected)
+            self.assertFalse(seq.has_metadata())
+            self.assertEqual(seq.metadata, {})
 
-    def test_k_word_frequencies_floating_point_precision(self):
-        # Test that a sequence having no variation in k-words yields a
-        # frequency of exactly 1.0. Note that it is important to use
-        # self.assertEqual here instead of self.assertAlmostEqual because we
-        # want to test for exactly 1.0. A previous implementation of
-        # BiologicalSequence.k_word_frequencies added (1 / num_words) for each
-        # occurrence of a k-word to compute the frequencies (see
-        # https://github.com/biocore/scikit-bio/issues/801). In certain cases,
-        # this yielded a frequency slightly less than 1.0 due to roundoff
-        # error. The test case here uses a sequence with 10 characters that are
-        # all identical and computes k-word frequencies with k=1. This test
-        # case exposes the roundoff error present in the previous
-        # implementation because there are 10 k-words (which are all
-        # identical), so 1/10 added 10 times yields a number slightly less than
-        # 1.0. This occurs because 1/10 cannot be represented exactly as a
-        # floating point number.
-        seq = BiologicalSequence('AAAAAAAAAA')
-        self.assertEqual(seq.k_word_frequencies(1),
-                         defaultdict(float, {'A': 1.0}))
+            self.assertTrue(seq.has_positional_metadata())
+            assert_data_frame_almost_equal(
+                seq.positional_metadata,
+                pd.DataFrame({'foo': item}, index=np.arange(0)))
 
-    def test_len(self):
-        self.assertEqual(len(self.b1), 7)
-        self.assertEqual(len(self.b2), 9)
-        self.assertEqual(len(self.b3), 4)
+    def test_init_single_positional_metadata_item(self):
+        for item in ([2], (2, ), np.array([2])):
+            seq = Sequence('G', positional_metadata={'foo': item})
 
-    def test_repr(self):
-        self.assertEqual(repr(self.b1),
-                         "<BiologicalSequence: GATTACA (length: 7)>")
-        self.assertEqual(repr(self.b6),
-                         "<BiologicalSequence: ACGTACGTAC... (length: 12)>")
+            self.assertFalse(seq.has_metadata())
+            self.assertEqual(seq.metadata, {})
 
-    def test_reversed(self):
-        b1_reversed = reversed(self.b1)
-        for actual, expected in zip(b1_reversed, "ACATTAG"):
-            self.assertEqual(actual, expected)
+            self.assertTrue(seq.has_positional_metadata())
+            assert_data_frame_almost_equal(
+                seq.positional_metadata,
+                pd.DataFrame({'foo': item}, index=np.arange(1)))
 
-        self.assertRaises(StopIteration, lambda: next(b1_reversed))
+    def test_init_multiple_positional_metadata_item(self):
+        for item in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
+                     (0, 42, 42, 1, 0, 8, 100, 0, 0),
+                     np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
+            seq = Sequence('G' * 9, positional_metadata={'foo': item})
 
-    def test_str(self):
-        self.assertEqual(str(self.b1), "GATTACA")
-        self.assertEqual(str(self.b2), "ACCGGTACC")
-        self.assertEqual(str(self.b3), "GREG")
+            self.assertFalse(seq.has_metadata())
+            self.assertEqual(seq.metadata, {})
 
-    def test_alphabet(self):
-        self.assertEqual(self.b1.alphabet(), set())
+            self.assertTrue(seq.has_positional_metadata())
+            assert_data_frame_almost_equal(
+                seq.positional_metadata,
+                pd.DataFrame({'foo': item}, index=np.arange(9)))
 
-    def test_gap_alphabet(self):
-        self.assertEqual(self.b1.gap_alphabet(), set('-.'))
+    def test_init_positional_metadata_multiple_columns(self):
+        seq = Sequence('^' * 5,
+                       positional_metadata={'foo': np.arange(5),
+                                            'bar': np.arange(5)[::-1]})
 
-    def test_sequence(self):
-        self.assertEqual(self.b1.sequence, "GATTACA")
-        self.assertEqual(self.b2.sequence, "ACCGGTACC")
-        self.assertEqual(self.b3.sequence, "GREG")
+        self.assertFalse(seq.has_metadata())
+        self.assertEqual(seq.metadata, {})
 
-    def test_id(self):
-        self.assertEqual(self.b1.id, "")
-        self.assertEqual(self.b2.id, "test-seq-2")
-        self.assertEqual(self.b3.id, "test-seq-3")
+        self.assertTrue(seq.has_positional_metadata())
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'foo': np.arange(5),
+                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
 
-    def test_description(self):
-        self.assertEqual(self.b1.description, "")
-        self.assertEqual(self.b2.description, "A test sequence")
-        self.assertEqual(self.b3.description, "A protein sequence")
+    def test_init_positional_metadata_with_custom_index(self):
+        df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
+                          index=['a', 'b', 'c', 'd', 'e'])
+        seq = Sequence('^' * 5, positional_metadata=df)
 
-    def test_quality(self):
-        a = BiologicalSequence('ACA', quality=(22, 22, 1))
+        self.assertFalse(seq.has_metadata())
+        self.assertEqual(seq.metadata, {})
 
-        # should get back a read-only numpy array of int dtype
-        self.assertIsInstance(a.quality, np.ndarray)
-        self.assertEqual(a.quality.dtype, np.int)
-        npt.assert_equal(a.quality, np.array((22, 22, 1)))
+        self.assertTrue(seq.has_positional_metadata())
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'foo': np.arange(5),
+                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
 
-        # test that we can't mutate the quality scores
+    def test_init_invalid_sequence(self):
+        # invalid dtype (numpy.ndarray input)
+        with self.assertRaises(TypeError):
+            # int64
+            Sequence(np.array([1, 2, 3]))
+        with self.assertRaises(TypeError):
+            # |S21
+            Sequence(np.array([1, "23", 3]))
+        with self.assertRaises(TypeError):
+            # object
+            Sequence(np.array([1, {}, ()]))
+
+        # invalid input type (non-numpy.ndarray input)
+        with six.assertRaisesRegex(self, TypeError, 'tuple'):
+            Sequence(('a', 'b', 'c'))
+        with six.assertRaisesRegex(self, TypeError, 'list'):
+            Sequence(['a', 'b', 'c'])
+        with six.assertRaisesRegex(self, TypeError, 'set'):
+            Sequence({'a', 'b', 'c'})
+        with six.assertRaisesRegex(self, TypeError, 'dict'):
+            Sequence({'a': 42, 'b': 43, 'c': 44})
+        with six.assertRaisesRegex(self, TypeError, 'int'):
+            Sequence(42)
+        with six.assertRaisesRegex(self, TypeError, 'float'):
+            Sequence(4.2)
+        with six.assertRaisesRegex(self, TypeError, 'int64'):
+            Sequence(np.int_(50))
+        with six.assertRaisesRegex(self, TypeError, 'float64'):
+            Sequence(np.float_(50))
+        with six.assertRaisesRegex(self, TypeError, 'Foo'):
+            class Foo(object):
+                pass
+            Sequence(Foo())
+
+        # out of ASCII range
+        with self.assertRaises(UnicodeEncodeError):
+            Sequence(u'abc\u1F30')
+
+    def test_init_invalid_metadata(self):
+        for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
+            with six.assertRaisesRegex(self, TypeError,
+                                       'metadata must be a dict'):
+                Sequence('abc', metadata=md)
+
+    def test_init_invalid_positional_metadata(self):
+        # not consumable by Pandas
+        with six.assertRaisesRegex(self, TypeError,
+                                   'Positional metadata invalid. Must be '
+                                   'consumable by pd.DataFrame. '
+                                   'Original pandas error message: '):
+            Sequence('ACGT', positional_metadata=2)
+        # 0 elements
+        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(4\)'):
+            Sequence('ACGT', positional_metadata=[])
+        # not enough elements
+        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
+            Sequence('ACGT', positional_metadata=[2, 3, 4])
+        # too many elements
+        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
+            Sequence('ACGT', positional_metadata=[2, 3, 4, 5, 6])
+        # Series not enough rows
+        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
+            Sequence('ACGT', positional_metadata=pd.Series(range(3)))
+        # Series too many rows
+        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
+            Sequence('ACGT', positional_metadata=pd.Series(range(5)))
+        # DataFrame not enough rows
+        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
+            Sequence('ACGT',
+                     positional_metadata=pd.DataFrame({'quality': range(3)}))
+        # DataFrame too many rows
+        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
+            Sequence('ACGT',
+                     positional_metadata=pd.DataFrame({'quality': range(5)}))
+
+    def test_values_property(self):
+        # Property tests are only concerned with testing the interface
+        # provided by the property: that it can be accessed, can't be
+        # reassigned or mutated in place, and that the correct type is
+        # returned. More extensive testing of border cases (e.g., different
+        # sequence lengths or input types, odd characters, etc.) are performed
+        # in Sequence.__init__ tests.
+
+        seq = Sequence('ACGT')
+
+        # should get back a numpy.ndarray of '|S1' dtype
+        self.assertIsInstance(seq.values, np.ndarray)
+        self.assertEqual(seq.values.dtype, '|S1')
+        npt.assert_equal(seq.values, np.array('ACGT', dtype='c'))
+
+        # test that we can't mutate the property
         with self.assertRaises(ValueError):
-            a.quality[1] = 42
+            seq.values[1] = 'A'
 
         # test that we can't set the property
         with self.assertRaises(AttributeError):
-            a.quality = (22, 22, 42)
+            seq.values = np.array("GGGG", dtype='c')
+
+    def test_metadata_property_getter(self):
+        md = {'foo': 'bar'}
+        seq = Sequence('', metadata=md)
+        self.assertIsInstance(seq.metadata, dict)
+        self.assertEqual(seq.metadata, md)
+        self.assertIsNot(seq.metadata, md)
+
+        # update existing key
+        seq.metadata['foo'] = 'baz'
+        self.assertEqual(seq.metadata, {'foo': 'baz'})
+
+        # add new key
+        seq.metadata['foo2'] = 'bar2'
+        self.assertEqual(seq.metadata, {'foo': 'baz', 'foo2': 'bar2'})
+
+    def test_metadata_property_getter_missing(self):
+        seq = Sequence('ACGT')
+
+        self.assertIsNone(seq._metadata)
+        self.assertEqual(seq.metadata, {})
+        self.assertIsNotNone(seq._metadata)
+
+    def test_metadata_property_setter(self):
+        md = {'foo': 'bar'}
+        seq = Sequence('', metadata=md)
+        self.assertEqual(seq.metadata, md)
+        self.assertIsNot(seq.metadata, md)
+
+        new_md = {'bar': 'baz', 42: 42}
+        seq.metadata = new_md
+        self.assertEqual(seq.metadata, new_md)
+        self.assertIsNot(seq.metadata, new_md)
+
+        seq.metadata = {}
+        self.assertEqual(seq.metadata, {})
+        self.assertFalse(seq.has_metadata())
+
+    def test_metadata_property_setter_invalid_type(self):
+        seq = Sequence('abc', metadata={123: 456})
+
+        for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
+                   pd.DataFrame()):
+            with six.assertRaisesRegex(self, TypeError,
+                                       'metadata must be a dict'):
+                seq.metadata = md
+
+            # object should still be usable and its original metadata shouldn't
+            # have changed
+            self.assertEqual(seq.metadata, {123: 456})
+
+    def test_metadata_property_deleter(self):
+        md = {'foo': 'bar'}
+        seq = Sequence('CAT', metadata=md)
+        self.assertTrue(seq.has_metadata())
+        self.assertEqual(seq.metadata, md)
+        self.assertIsNot(seq.metadata, md)
+
+        del seq.metadata
+        self.assertIsNone(seq._metadata)
+        self.assertFalse(seq.has_metadata())
+        self.assertEqual(seq.metadata, {})
+
+        # test deleting again
+        del seq.metadata
+        self.assertIsNone(seq._metadata)
+        self.assertFalse(seq.has_metadata())
+        self.assertEqual(seq.metadata, {})
+
+        # test deleting missing metadata immediately after instantiation
+        seq = Sequence('ACGT')
+        self.assertIsNone(seq._metadata)
+        del seq.metadata
+        self.assertIsNone(seq._metadata)
+
+    def test_metadata_property_shallow_copy(self):
+        md = {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]}
+        seq = Sequence('CAT', metadata=md)
+
+        self.assertTrue(seq.has_metadata())
+        self.assertEqual(seq.metadata, md)
+        self.assertIsNot(seq.metadata, md)
+
+        # updates to keys
+        seq.metadata['key1'] = 'new val'
+        self.assertEqual(seq.metadata,
+                         {'key1': 'new val', 'key2': 'val2', 'key3': [1, 2]})
+        # original metadata untouched
+        self.assertEqual(md, {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]})
+
+        # updates to mutable value (by reference)
+        seq.metadata['key3'].append(3)
+        self.assertEqual(
+            seq.metadata,
+            {'key1': 'new val', 'key2': 'val2', 'key3': [1, 2, 3]})
+        # original metadata changed because we didn't deep copy
+        self.assertEqual(
+            md,
+            {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2, 3]})
+
+    def test_positional_metadata_property_getter(self):
+        md = pd.DataFrame({'foo': [22, 22, 0]})
+        seq = Sequence('ACA', positional_metadata=md)
+
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame({'foo': [22, 22, 0]}))
+        self.assertIsNot(seq.positional_metadata, md)
+
+        # update existing column
+        seq.positional_metadata['foo'] = [42, 42, 43]
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame({'foo': [42, 42, 43]}))
+
+        # add new column
+        seq.positional_metadata['foo2'] = [True, False, True]
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'foo': [42, 42, 43],
+                          'foo2': [True, False, True]}))
+
+    def test_positional_metadata_property_getter_missing(self):
+        seq = Sequence('ACGT')
+
+        self.assertIsNone(seq._positional_metadata)
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame(index=np.arange(4)))
+        self.assertIsNotNone(seq._positional_metadata)
+
+    def test_positional_metadata_property_setter(self):
+        md = pd.DataFrame({'foo': [22, 22, 0]})
+        seq = Sequence('ACA', positional_metadata=md)
+
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame({'foo': [22, 22, 0]}))
+        self.assertIsNot(seq.positional_metadata, md)
+
+        new_md = pd.DataFrame({'bar': np.arange(3)}, index=['a', 'b', 'c'])
+        seq.positional_metadata = new_md
+
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'bar': np.arange(3)}, index=np.arange(3)))
+        self.assertIsNot(seq.positional_metadata, new_md)
+
+        seq.positional_metadata = pd.DataFrame(index=np.arange(3))
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame(index=np.arange(3)))
+        self.assertFalse(seq.has_positional_metadata())
+
+    def test_positional_metadata_property_setter_invalid_type(self):
+        # More extensive tests for invalid input are on Sequence.__init__ tests
+
+        seq = Sequence('abc', positional_metadata={'foo': [1, 2, 42]})
+
+        # not consumable by Pandas
+        with six.assertRaisesRegex(self, TypeError,
+                                   'Positional metadata invalid. Must be '
+                                   'consumable by pd.DataFrame. '
+                                   'Original pandas error message: '):
+            seq.positional_metadata = 2
+
+        # object should still be usable and its original metadata shouldn't
+        # have changed
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame({'foo': [1, 2, 42]}))
+
+        # wrong length
+        with six.assertRaisesRegex(self, ValueError, '\(2\).*\(3\)'):
+            seq.positional_metadata = {'foo': [1, 2]}
+
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame({'foo': [1, 2, 42]}))
+
+        # None isn't valid when using setter (differs from constructor)
+        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(3\)'):
+            seq.positional_metadata = None
+
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame({'foo': [1, 2, 42]}))
+
+    def test_positional_metadata_property_deleter(self):
+        md = pd.DataFrame({'foo': [22, 22, 0]})
+        seq = Sequence('ACA', positional_metadata=md)
+
+        self.assertTrue(seq.has_positional_metadata())
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame({'foo': [22, 22, 0]}))
+        self.assertIsNot(seq.positional_metadata, md)
+
+        del seq.positional_metadata
+        self.assertIsNone(seq._positional_metadata)
+        self.assertFalse(seq.has_positional_metadata())
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame(index=np.arange(3)))
+
+        # test deleting again
+        del seq.positional_metadata
+        self.assertIsNone(seq._positional_metadata)
+        self.assertFalse(seq.has_positional_metadata())
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame(index=np.arange(3)))
+
+        # test deleting missing positional metadata immediately after
+        # instantiation
+        seq = Sequence('ACGT')
+        self.assertIsNone(seq._positional_metadata)
+        del seq.positional_metadata
+        self.assertIsNone(seq._positional_metadata)
+
+    def test_positional_metadata_property_shallow_copy(self):
+        # define metadata as a DataFrame because this has the potential to have
+        # its underlying data shared
+        md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
+        seq = Sequence('ACA', positional_metadata=md)
+
+        self.assertTrue(seq.has_positional_metadata())
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'foo': [22, 22, 0]}, index=np.arange(3)))
+        self.assertIsNot(seq.positional_metadata, md)
+
+        # original metadata untouched
+        orig_md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
+        assert_data_frame_almost_equal(md, orig_md)
+
+        # change values of column (using same dtype)
+        seq.positional_metadata['foo'] = [42, 42, 42]
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'foo': [42, 42, 42]}, index=np.arange(3)))
+
+        # original metadata untouched
+        assert_data_frame_almost_equal(md, orig_md)
+
+        # change single value of underlying data
+        seq.positional_metadata.values[0][0] = 10
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'foo': [10, 42, 42]}, index=np.arange(3)))
+
+        # original metadata untouched
+        assert_data_frame_almost_equal(md, orig_md)
+
+        # create column of object dtype -- these aren't deep copied
+        md = pd.DataFrame({'obj': [[], [], []]}, index=['a', 'b', 'c'])
+        seq = Sequence('ACA', positional_metadata=md)
+
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'obj': [[], [], []]}, index=np.arange(3)))
+
+        # mutate list
+        seq.positional_metadata['obj'][0].append(42)
+        assert_data_frame_almost_equal(
+            seq.positional_metadata,
+            pd.DataFrame({'obj': [[42], [], []]}, index=np.arange(3)))
+
+        # original metadata changed because we didn't do a full deep copy
+        assert_data_frame_almost_equal(
+            md,
+            pd.DataFrame({'obj': [[42], [], []]}, index=['a', 'b', 'c']))
+
+    def test_positional_metadata_property_set_column_series(self):
+        seq_text = 'ACGTACGT'
+        l = len(seq_text)
+        seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
+        seq.positional_metadata['bar'] = pd.Series(range(l-3))
+        # pandas.Series will be padded with NaN if too short
+        npt.assert_equal(seq.positional_metadata['bar'],
+                         np.array(list(range(l-3)) + [np.NaN]*3))
+        seq.positional_metadata['baz'] = pd.Series(range(l+3))
+        # pandas.Series will be truncated if too long
+        npt.assert_equal(seq.positional_metadata['baz'],
+                         np.array(range(l)))
+
+    def test_positional_metadata_property_set_column_array(self):
+        seq_text = 'ACGTACGT'
+        l = len(seq_text)
+        seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
+        # array-like objects will fail if wrong size
+        for array_like in (np.array(range(l-1)), range(l-1),
+                           np.array(range(l+1)), range(l+1)):
+            with six.assertRaisesRegex(self, ValueError,
+                                       "Length of values does not match "
+                                       "length of index"):
+                seq.positional_metadata['bar'] = array_like
 
-    def test_quality_not_provided(self):
-        b = BiologicalSequence('ACA')
-        self.assertIs(b.quality, None)
+    def test_eq_and_ne(self):
+        seq_a = Sequence("A")
+        seq_b = Sequence("B")
+
+        self.assertTrue(seq_a == seq_a)
+        self.assertTrue(Sequence("a") == Sequence("a"))
+        self.assertTrue(Sequence("a", metadata={'id': 'b'}) ==
+                        Sequence("a", metadata={'id': 'b'}))
+        self.assertTrue(Sequence("a",
+                                 metadata={'id': 'b', 'description': 'c'}) ==
+                        Sequence("a",
+                                 metadata={'id': 'b', 'description': 'c'}))
+        self.assertTrue(Sequence("a", metadata={'id': 'b', 'description': 'c'},
+                                 positional_metadata={'quality': [1]}) ==
+                        Sequence("a", metadata={'id': 'b', 'description': 'c'},
+                                 positional_metadata={'quality': [1]}))
+
+        self.assertTrue(seq_a != seq_b)
+        self.assertTrue(SequenceSubclass("a") != Sequence("a"))
+        self.assertTrue(Sequence("a") != Sequence("b"))
+        self.assertTrue(Sequence("a") != Sequence("a", metadata={'id': 'b'}))
+        self.assertTrue(Sequence("a", metadata={'id': 'c'}) !=
+                        Sequence("a",
+                                 metadata={'id': 'c', 'description': 't'}))
+        self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
+                        Sequence("a"))
+        self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
+                        Sequence("a", positional_metadata={'quality': [2]}))
+        self.assertTrue(Sequence("c", positional_metadata={'quality': [3]}) !=
+                        Sequence("b", positional_metadata={'quality': [3]}))
+        self.assertTrue(Sequence("a", metadata={'id': 'b'}) !=
+                        Sequence("c", metadata={'id': 'b'}))
+
+    def test_eq_sequences_without_metadata_compare_equal(self):
+        self.assertTrue(Sequence('') == Sequence(''))
+        self.assertTrue(Sequence('z') == Sequence('z'))
+        self.assertTrue(
+            Sequence('ACGT') == Sequence('ACGT'))
+
+    def test_eq_sequences_with_metadata_compare_equal(self):
+        seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
+                        positional_metadata={'qual': [1, 2, 3, 4]})
+        seq2 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
+                        positional_metadata={'qual': [1, 2, 3, 4]})
+        self.assertTrue(seq1 == seq2)
+
+        # order shouldn't matter
+        self.assertTrue(seq2 == seq1)
+
+    def test_eq_sequences_from_different_sources_compare_equal(self):
+        # sequences that have the same data but are constructed from different
+        # types of data should compare equal
+        seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
+                        positional_metadata={'quality': (1, 2, 3, 4)})
+        seq2 = Sequence(np.array([65, 67, 71, 84], dtype=np.uint8),
+                        metadata={'id': 'foo', 'desc': 'abc'},
+                        positional_metadata={'quality': np.array([1, 2, 3,
+                                                                  4])})
+        self.assertTrue(seq1 == seq2)
+
+    def test_eq_type_mismatch(self):
+        seq1 = Sequence('ACGT')
+        seq2 = SequenceSubclass('ACGT')
+        self.assertFalse(seq1 == seq2)
+
+    def test_eq_metadata_mismatch(self):
+        # both provided
+        seq1 = Sequence('ACGT', metadata={'id': 'foo'})
+        seq2 = Sequence('ACGT', metadata={'id': 'bar'})
+        self.assertFalse(seq1 == seq2)
+
+        # one provided
+        seq1 = Sequence('ACGT', metadata={'id': 'foo'})
+        seq2 = Sequence('ACGT')
+        self.assertFalse(seq1 == seq2)
+
+    def test_eq_positional_metadata_mismatch(self):
+        # both provided
+        seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
+        seq2 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 5]})
+        self.assertFalse(seq1 == seq2)
+
+        # one provided
+        seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
+        seq2 = Sequence('ACGT')
+        self.assertFalse(seq1 == seq2)
+
+    def test_eq_sequence_mismatch(self):
+        seq1 = Sequence('ACGT')
+        seq2 = Sequence('TGCA')
+        self.assertFalse(seq1 == seq2)
+
+    def test_eq_handles_missing_metadata_efficiently(self):
+        seq1 = Sequence('ACGT')
+        seq2 = Sequence('ACGT')
+        self.assertTrue(seq1 == seq2)
+
+        # metadata attributes should be None and not initialized to a "missing"
+        # representation
+        self.assertIsNone(seq1._metadata)
+        self.assertIsNone(seq1._positional_metadata)
+        self.assertIsNone(seq2._metadata)
+        self.assertIsNone(seq2._positional_metadata)
+
+    def test_getitem_gives_new_sequence(self):
+        seq = Sequence("Sequence string !1 at 2#3?.,")
+        self.assertFalse(seq is seq[:])
+
+    def test_getitem_with_int_has_positional_metadata(self):
+        s = "Sequence string !1 at 2#3?.,"
+        length = len(s)
+        seq = Sequence(s, metadata={'id': 'id', 'description': 'dsc'},
+                       positional_metadata={'quality': np.arange(length)})
+
+        eseq = Sequence("S", {'id': 'id', 'description': 'dsc'},
+                        positional_metadata={'quality': np.array([0])})
+        self.assertEqual(seq[0], eseq)
+
+        eseq = Sequence(",", metadata={'id': 'id', 'description': 'dsc'},
+                        positional_metadata={'quality':
+                                             np.array([len(seq) - 1])})
+        self.assertEqual(seq[len(seq) - 1], eseq)
+
+        eseq = Sequence("t", metadata={'id': 'id', 'description': 'dsc'},
+                        positional_metadata={'quality': [10]})
+        self.assertEqual(seq[10], eseq)
+
+    def test_single_index_to_slice(self):
+        a = [1, 2, 3, 4]
+        self.assertEqual(slice(0, 1), _single_index_to_slice(0))
+        self.assertEqual([1], a[_single_index_to_slice(0)])
+        self.assertEqual(slice(-1, None),
+                         _single_index_to_slice(-1))
+        self.assertEqual([4], a[_single_index_to_slice(-1)])
+
+    def test_is_single_index(self):
+        self.assertTrue(_is_single_index(0))
+        self.assertFalse(_is_single_index(True))
+        self.assertFalse(_is_single_index(bool()))
+        self.assertFalse(_is_single_index('a'))
+
+    def test_as_slice_if_single_index(self):
+        self.assertEqual(slice(0, 1), _as_slice_if_single_index(0))
+        slice_obj = slice(2, 3)
+        self.assertIs(slice_obj,
+                      _as_slice_if_single_index(slice_obj))
+
+    def test_slice_positional_metadata(self):
+        seq = Sequence('ABCDEFGHIJ',
+                       positional_metadata={'foo': np.arange(10),
+                                            'bar': np.arange(100, 110)})
+        self.assertTrue(pd.DataFrame({'foo': [0], 'bar': [100]}).equals(
+                        seq._slice_positional_metadata(0)))
+        self.assertTrue(pd.DataFrame({'foo': [0], 'bar': [100]}).equals(
+                        seq._slice_positional_metadata(slice(0, 1))))
+        self.assertTrue(pd.DataFrame({'foo': [0, 1],
+                                      'bar': [100, 101]}).equals(
+                        seq._slice_positional_metadata(slice(0, 2))))
+        self.assertTrue(pd.DataFrame(
+            {'foo': [9], 'bar': [109]}, index=[9]).equals(
+                seq._slice_positional_metadata(9)))
+
+    def test_getitem_with_int_no_positional_metadata(self):
+        seq = Sequence("Sequence string !1 at 2#3?.,",
+                       metadata={'id': 'id2', 'description': 'no_qual'})
+
+        eseq = Sequence("t", metadata={'id': 'id2', 'description': 'no_qual'})
+        self.assertEqual(seq[10], eseq)
+
+    def test_getitem_with_slice_has_positional_metadata(self):
+        s = "0123456789abcdef"
+        length = len(s)
+        seq = Sequence(s, metadata={'id': 'id3', 'description': 'dsc3'},
+                       positional_metadata={'quality': np.arange(length)})
+
+        eseq = Sequence("012", metadata={'id': 'id3', 'description': 'dsc3'},
+                        positional_metadata={'quality': np.arange(3)})
+        self.assertEqual(seq[0:3], eseq)
+        self.assertEqual(seq[:3], eseq)
+        self.assertEqual(seq[:3:1], eseq)
+
+        eseq = Sequence("def", metadata={'id': 'id3', 'description': 'dsc3'},
+                        positional_metadata={'quality': [13, 14, 15]})
+        self.assertEqual(seq[-3:], eseq)
+        self.assertEqual(seq[-3::1], eseq)
+
+        eseq = Sequence("02468ace",
+                        metadata={'id': 'id3', 'description': 'dsc3'},
+                        positional_metadata={'quality': [0, 2, 4, 6, 8, 10,
+                                                         12, 14]})
+        self.assertEqual(seq[0:length:2], eseq)
+        self.assertEqual(seq[::2], eseq)
+
+        eseq = Sequence(s[::-1], metadata={'id': 'id3', 'description': 'dsc3'},
+                        positional_metadata={'quality':
+                                             np.arange(length)[::-1]})
+        self.assertEqual(seq[length::-1], eseq)
+        self.assertEqual(seq[::-1], eseq)
+
+        eseq = Sequence('fdb97531',
+                        metadata={'id': 'id3', 'description': 'dsc3'},
+                        positional_metadata={'quality': [15, 13, 11, 9, 7, 5,
+                                                         3, 1]})
+        self.assertEqual(seq[length::-2], eseq)
+        self.assertEqual(seq[::-2], eseq)
+
+        self.assertEqual(seq[0:500:], seq)
+
+        eseq = Sequence('', metadata={'id': 'id3', 'description': 'dsc3'},
+                        positional_metadata={'quality':
+                                             np.array([], dtype=np.int64)})
+        self.assertEqual(seq[length:0], eseq)
+        self.assertEqual(seq[-length:0], eseq)
+        self.assertEqual(seq[1:0], eseq)
+
+        eseq = Sequence("0", metadata={'id': 'id3', 'description': 'dsc3'},
+                        positional_metadata={'quality': [0]})
+        self.assertEqual(seq[0:1], eseq)
+        self.assertEqual(seq[0:1:1], eseq)
+        self.assertEqual(seq[-length::-1], eseq)
+
+    def test_getitem_with_slice_no_positional_metadata(self):
+        s = "0123456789abcdef"
+        length = len(s)
+        seq = Sequence(s, metadata={'id': 'id4', 'description': 'no_qual4'})
+
+        eseq = Sequence("02468ace",
+                        metadata={'id': 'id4', 'description': 'no_qual4'})
+        self.assertEqual(seq[0:length:2], eseq)
+        self.assertEqual(seq[::2], eseq)
+
+    def test_getitem_with_tuple_of_mixed_with_positional_metadata(self):
+        s = "0123456789abcdef"
+        length = len(s)
+        seq = Sequence(s, metadata={'id': 'id5', 'description': 'dsc5'},
+                       positional_metadata={'quality': np.arange(length)})
+
+        eseq = Sequence("00000", metadata={'id': 'id5', 'description': 'dsc5'},
+                        positional_metadata={'quality': [0, 0, 0, 0, 0]})
+        self.assertEqual(seq[0, 0, 0, 0, 0], eseq)
+        self.assertEqual(seq[0, 0:1, 0, 0, 0], eseq)
+        self.assertEqual(seq[0, 0:1, 0, -length::-1, 0, 1:0], eseq)
+        self.assertEqual(seq[0:1, 0:1, 0:1, 0:1, 0:1], eseq)
+        self.assertEqual(seq[0:1, 0, 0, 0, 0], eseq)
+
+        eseq = Sequence("0123fed9",
+                        metadata={'id': 'id5', 'description': 'dsc5'},
+                        positional_metadata={'quality': [0, 1, 2, 3, 15, 14,
+                                                         13, 9]})
+        self.assertEqual(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
+        self.assertEqual(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
+        self.assertEqual(seq[0:4, :-4:-1, 9, 1:0], eseq)
+        self.assertEqual(seq[0:4, :-4:-1, 9:10], eseq)
+
+    def test_getitem_with_tuple_of_mixed_no_positional_metadata(self):
+        seq = Sequence("0123456789abcdef",
+                       metadata={'id': 'id6', 'description': 'no_qual6'})
+        eseq = Sequence("0123fed9",
+                        metadata={'id': 'id6', 'description': 'no_qual6'})
+        self.assertEqual(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
+        self.assertEqual(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
+        self.assertEqual(seq[0:4, :-4:-1, 9], eseq)
+        self.assertEqual(seq[0:4, :-4:-1, 9:10], eseq)
+
+    def test_getitem_with_iterable_of_mixed_has_positional_metadata(self):
+        s = "0123456789abcdef"
+        length = len(s)
+        seq = Sequence(s, metadata={'id': 'id7', 'description': 'dsc7'},
+                       positional_metadata={'quality': np.arange(length)})
+
+        def generator():
+            yield slice(0, 4)
+            yield slice(200, 400)
+            yield -1
+            yield slice(-2, -4, -1)
+            yield 9
+
+        eseq = Sequence("0123fed9",
+                        metadata={'id': 'id7', 'description': 'dsc7'},
+                        positional_metadata={'quality': [0, 1, 2, 3, 15, 14,
+                                                         13, 9]})
+        self.assertEqual(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
+        self.assertEqual(seq[generator()], eseq)
+        self.assertEqual(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
+        self.assertEqual(seq[
+            [slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
+
+    def test_getitem_with_iterable_of_mixed_no_positional_metadata(self):
+        s = "0123456789abcdef"
+        seq = Sequence(s, metadata={'id': 'id7', 'description': 'dsc7'})
+
+        def generator():
+            yield slice(0, 4)
+            yield slice(200, 400)
+            yield slice(None, -4, -1)
+            yield 9
+
+        eseq = Sequence("0123fed9",
+                        metadata={'id': 'id7', 'description': 'dsc7'})
+        self.assertEqual(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
+        self.assertEqual(seq[generator()], eseq)
+        self.assertEqual(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
+        self.assertEqual(seq[
+            [slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
+
+    def test_getitem_with_numpy_index_has_positional_metadata(self):
+        s = "0123456789abcdef"
+        length = len(s)
+        seq = Sequence(s, metadata={'id': 'id9', 'description': 'dsc9'},
+                       positional_metadata={'quality': np.arange(length)})
+
+        eseq = Sequence("0123fed9",
+                        metadata={'id': 'id9', 'description': 'dsc9'},
+                        positional_metadata={'quality': [0, 1, 2, 3, 15, 14,
+                                                         13, 9]})
+        self.assertEqual(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
+
+    def test_getitem_with_numpy_index_no_positional_metadata(self):
+        s = "0123456789abcdef"
+        seq = Sequence(s, metadata={'id': 'id10', 'description': 'dsc10'})
+
+        eseq = Sequence("0123fed9",
+                        metadata={'id': 'id10', 'description': 'dsc10'})
+        self.assertEqual(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
+
+    def test_getitem_with_empty_indices_empty_seq_no_pos_metadata(self):
+        s = ""
+        seq = Sequence(s, metadata={'id': 'id10', 'description': 'dsc10'})
+
+        eseq = Sequence('', metadata={'id': 'id10', 'description': 'dsc10'})
+
+        tested = 0
+        for index in self.getitem_empty_indices:
+            tested += 1
+            self.assertEqual(seq[index], eseq)
+        self.assertEqual(tested, 6)
+
+    def test_getitem_with_empty_indices_non_empty_seq_no_pos_metadata(self):
+        s = "0123456789abcdef"
+        seq = Sequence(s, metadata={'id': 'id10', 'description': 'dsc10'})
+
+        eseq = Sequence('', metadata={'id': 'id10', 'description': 'dsc10'})
+
+        tested = 0
+        for index in self.getitem_empty_indices:
+            tested += 1
+            self.assertEqual(seq[index], eseq)
+        self.assertEqual(tested, 6)
+
+    def test_getitem_with_boolean_vector_has_qual(self):
+        s = "0123456789abcdef"
+        length = len(s)
+        seq = Sequence(s, metadata={'id': 'id11', 'description': 'dsc11'},
+                       positional_metadata={'quality': np.arange(length)})
+
+        eseq = Sequence("13579bdf",
+                        metadata={'id': 'id11', 'description': 'dsc11'},
+                        positional_metadata={'quality': [1, 3, 5, 7, 9, 11,
+                                                         13, 15]})
+
+        self.assertEqual(seq[np.array([False, True] * 8)], eseq)
+        self.assertEqual(seq[[False, True] * 8], eseq)
+
+    def test_getitem_with_boolean_vector_no_positional_metadata(self):
+        s = "0123456789abcdef"
+        seq = Sequence(s, metadata={'id': 'id11', 'description': 'dsc11'})
+
+        eseq = Sequence("13579bdf",
+                        metadata={'id': 'id11', 'description': 'dsc11'})
+
+        self.assertEqual(seq[np.array([False, True] * 8)], eseq)
+
+    def test_getitem_with_invalid(self):
+        seq = Sequence("123456",
+                       metadata={'id': 'idm', 'description': 'description'},
+                       positional_metadata={'quality': [1, 2, 3, 4, 5, 6]})
 
-    def test_quality_scalar(self):
-        b = BiologicalSequence('G', quality=2)
+        with self.assertRaises(IndexError):
+            seq['not an index']
 
-        self.assertIsInstance(b.quality, np.ndarray)
-        self.assertEqual(b.quality.dtype, np.int)
-        self.assertEqual(b.quality.shape, (1,))
-        npt.assert_equal(b.quality, np.array([2]))
+        with self.assertRaises(IndexError):
+            seq[['1', '2']]
+
+        with self.assertRaises(IndexError):
+            seq[[1, slice(1, 2), 'a']]
 
-    def test_quality_empty(self):
-        b = BiologicalSequence('', quality=[])
+        with self.assertRaises(IndexError):
+            seq[[1, slice(1, 2), True]]
 
-        self.assertIsInstance(b.quality, np.ndarray)
-        self.assertEqual(b.quality.dtype, np.int)
-        self.assertEqual(b.quality.shape, (0,))
-        npt.assert_equal(b.quality, np.array([]))
+        with self.assertRaises(IndexError):
+            seq[True]
 
-    def test_quality_no_copy(self):
-        qual = np.array([22, 22, 1])
-        a = BiologicalSequence('ACA', quality=qual)
-        self.assertIs(a.quality, qual)
+        with self.assertRaises(IndexError):
+            seq[np.array([True, False])]
 
-        with self.assertRaises(ValueError):
-            a.quality[1] = 42
+        with self.assertRaises(IndexError):
+            seq[999]
 
-        with self.assertRaises(ValueError):
-            qual[1] = 42
+        with self.assertRaises(IndexError):
+            seq[0, 0, 999]
+
+        # numpy 1.8.1 and 1.9.2 raise different error types
+        # (ValueError, IndexError).
+        with self.assertRaises(Exception):
+            seq[100 * [True, False, True]]
+
+    def test_getitem_handles_missing_metadata_efficiently(self):
+        # there are two paths in __getitem__ we need to test for efficient
+        # handling of missing metadata
+
+        # path 1: mixed types
+        seq = Sequence('ACGT')
+        subseq = seq[1, 2:4]
+        self.assertEqual(subseq, Sequence('CGT'))
+
+        # metadata attributes should be None and not initialized to a "missing"
+        # representation
+        self.assertIsNone(seq._metadata)
+        self.assertIsNone(seq._positional_metadata)
+        self.assertIsNone(subseq._metadata)
+        self.assertIsNone(subseq._positional_metadata)
+
+        # path 2: uniform types
+        seq = Sequence('ACGT')
+        subseq = seq[1:3]
+        self.assertEqual(subseq, Sequence('CG'))
+
+        self.assertIsNone(seq._metadata)
+        self.assertIsNone(seq._positional_metadata)
+        self.assertIsNone(subseq._metadata)
+        self.assertIsNone(subseq._positional_metadata)
 
-    def test_has_quality(self):
-        a = BiologicalSequence('ACA', quality=(5, 4, 67))
-        self.assertTrue(a.has_quality())
+    def test_len(self):
+        self.assertEqual(len(Sequence("")), 0)
+        self.assertEqual(len(Sequence("a")), 1)
+        self.assertEqual(len(Sequence("abcdef")), 6)
+
+    def test_nonzero(self):
+        # blank
+        self.assertFalse(Sequence(""))
+        self.assertFalse(Sequence("",
+                                  metadata={'id': 'foo'},
+                                  positional_metadata={'quality': range(0)}))
+        # single
+        self.assertTrue(Sequence("A"))
+        self.assertTrue(Sequence("A",
+                                 metadata={'id': 'foo'},
+                                 positional_metadata={'quality': range(1)}))
+        # multi
+        self.assertTrue(Sequence("ACGT"))
+        self.assertTrue(Sequence("ACGT",
+                                 metadata={'id': 'foo'},
+                                 positional_metadata={'quality': range(4)}))
 
-        b = BiologicalSequence('ACA')
-        self.assertFalse(b.has_quality())
+    def test_contains(self):
+        seq = Sequence("#@ACGT,24.13**02")
+        tested = 0
+        for c in self.sequence_kinds:
+            tested += 1
+            self.assertTrue(c(',24') in seq)
+            self.assertTrue(c('*') in seq)
+            self.assertTrue(c('') in seq)
 
-    def test_copy_default_behavior(self):
-        # minimal sequence, sequence with all optional attributes present, and
-        # a subclass of BiologicalSequence
-        for seq in self.b6, self.b8, RNASequence('ACGU', id='rna seq'):
-            copy = seq.copy()
-            self.assertTrue(seq.equals(copy))
-            self.assertFalse(seq is copy)
-
-    def test_copy_update_single_attribute(self):
-        copy = self.b8.copy(id='new id')
-        self.assertFalse(self.b8 is copy)
-
-        # they don't compare equal when we compare all attributes...
-        self.assertFalse(self.b8.equals(copy))
-
-        # ...but they *do* compare equal when we ignore id, as that was the
-        # only attribute that changed
-        self.assertTrue(self.b8.equals(copy, ignore=['id']))
-
-        # id should be what we specified in the copy call...
-        self.assertEqual(copy.id, 'new id')
-
-        # ..and shouldn't have changed on the original sequence
-        self.assertEqual(self.b8.id, 'hello')
-
-    def test_copy_update_multiple_attributes(self):
-        copy = self.b8.copy(id='new id', quality=range(20, 25),
-                            sequence='ACGTA', description='new desc')
-        self.assertFalse(self.b8 is copy)
-        self.assertFalse(self.b8.equals(copy))
-
-        # attributes should be what we specified in the copy call...
-        self.assertEqual(copy.id, 'new id')
-        npt.assert_equal(copy.quality, np.array([20, 21, 22, 23, 24]))
-        self.assertEqual(copy.sequence, 'ACGTA')
-        self.assertEqual(copy.description, 'new desc')
-
-        # ..and shouldn't have changed on the original sequence
-        self.assertEqual(self.b8.id, 'hello')
-        npt.assert_equal(self.b8.quality, range(11))
-        self.assertEqual(self.b8.sequence, 'HE..--..LLO')
-        self.assertEqual(self.b8.description, 'gapped hello')
-
-    def test_copy_invalid_kwargs(self):
+            self.assertFalse(c("$") in seq)
+            self.assertFalse(c("AGT") in seq)
+
+        self.assertEqual(tested, 4)
+
+    def test_contains_sequence_subclass(self):
         with self.assertRaises(TypeError):
-            self.b2.copy(id='bar', unrecognized_kwarg='baz')
+            SequenceSubclass("A") in Sequence("AAA")
 
-    def test_copy_extra_non_attribute_kwargs(self):
-        # test that we can pass through additional kwargs to the constructor
-        # that aren't related to biological sequence attributes (i.e., they
-        # aren't state that has to be copied)
+        self.assertTrue(SequenceSubclass("A").values in Sequence("AAA"))
 
-        # create an invalid DNA sequence
-        a = DNASequence('FOO', description='foo')
+    def test_hash(self):
+        with self.assertRaises(TypeError):
+            hash(Sequence("ABCDEFG"))
+        self.assertNotIsInstance(Sequence("ABCDEFG"), Hashable)
+
+    def test_iter_has_positional_metadata(self):
+        tested = False
+        seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'},
+                       positional_metadata={'qual': np.arange(10)})
+        for i, s in enumerate(seq):
+            tested = True
+            self.assertEqual(s, Sequence(str(i),
+                                         metadata={'id': 'a', 'desc': 'b'},
+                                         positional_metadata={'qual': [i]}))
+        self.assertTrue(tested)
+
+    def test_iter_no_positional_metadata(self):
+        tested = False
+        seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'})
+        for i, s in enumerate(seq):
+            tested = True
+            self.assertEqual(s, Sequence(str(i),
+                                         metadata={'id': 'a', 'desc': 'b'}))
+        self.assertTrue(tested)
+
+    def test_reversed_has_positional_metadata(self):
+        tested = False
+        seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'},
+                       positional_metadata={'qual': np.arange(10)})
+        for i, s in enumerate(reversed(seq)):
+            tested = True
+            self.assertEqual(s, Sequence(str(9 - i),
+                                         metadata={'id': 'a', 'desc': 'b'},
+                                         positional_metadata={'qual':
+                                                              [9 - i]}))
+        self.assertTrue(tested)
+
+    def test_reversed_no_positional_metadata(self):
+        tested = False
+        seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'})
+        for i, s in enumerate(reversed(seq)):
+            tested = True
+            self.assertEqual(s, Sequence(str(9 - i),
+                                         metadata={'id': 'a', 'desc': 'b'}))
+        self.assertTrue(tested)
 
-        # should be able to copy it b/c validate defaults to False
-        b = a.copy()
-        self.assertTrue(a.equals(b))
-        self.assertFalse(a is b)
+    def test_repr(self):
+        # basic sanity checks -- more extensive testing of formatting and
+        # special cases is performed in SequenceReprDoctests below. here we
+        # only test that pieces of the repr are present. these tests also
+        # exercise coverage for py2/3 since the doctests in
+        # SequenceReprDoctests only currently run in py2.
+
+        # minimal
+        obs = repr(Sequence(''))
+        self.assertEqual(obs.count('\n'), 4)
+        self.assertTrue(obs.startswith('Sequence'))
+        self.assertIn('length: 0', obs)
+        self.assertTrue(obs.endswith('-'))
+
+        # no metadata
+        obs = repr(Sequence('ACGT'))
+        self.assertEqual(obs.count('\n'), 5)
+        self.assertTrue(obs.startswith('Sequence'))
+        self.assertIn('length: 4', obs)
+        self.assertTrue(obs.endswith('0 ACGT'))
+
+        # metadata and positional metadata of mixed types
+        obs = repr(
+            Sequence(
+                'ACGT',
+                metadata={'foo': 'bar', u'bar': 33.33, None: True, False: {},
+                          (1, 2): 3, 'acb' * 100: "'", 10: 11},
+                positional_metadata={'foo': range(4),
+                                     42: ['a', 'b', [], 'c']}))
+        self.assertEqual(obs.count('\n'), 16)
+        self.assertTrue(obs.startswith('Sequence'))
+        self.assertIn('None: True', obs)
+        self.assertIn('\'foo\': \'bar\'', obs)
+        self.assertIn('42: <dtype: object>', obs)
+        self.assertIn('\'foo\': <dtype: int64>', obs)
+        self.assertIn('length: 4', obs)
+        self.assertTrue(obs.endswith('0 ACGT'))
+
+        # sequence spanning > 5 lines
+        obs = repr(Sequence('A' * 301))
+        self.assertEqual(obs.count('\n'), 9)
+        self.assertTrue(obs.startswith('Sequence'))
+        self.assertIn('length: 301', obs)
+        self.assertIn('...', obs)
+        self.assertTrue(obs.endswith('300 A'))
 
-        # specifying validate should raise an error when the copy is
-        # instantiated
-        with self.assertRaises(BiologicalSequenceError):
-            a.copy(validate=True)
+    def test_str(self):
+        self.assertEqual(str(Sequence("GATTACA")), "GATTACA")
+        self.assertEqual(str(Sequence("ACCGGTACC")), "ACCGGTACC")
+        self.assertEqual(str(Sequence("GREG")), "GREG")
+        self.assertEqual(
+            str(Sequence("ABC",
+                         positional_metadata={'quality': [1, 2, 3]})),
+            "ABC")
+        self.assertIs(type(str(Sequence("A"))), str)
 
-    def test_equals_true(self):
-        # sequences match, all other attributes are not provided
-        self.assertTrue(
-            BiologicalSequence('ACGT').equals(BiologicalSequence('ACGT')))
-
-        # all attributes are provided and match
-        a = BiologicalSequence('ACGT', id='foo', description='abc',
-                               quality=[1, 2, 3, 4])
-        b = BiologicalSequence('ACGT', id='foo', description='abc',
-                               quality=[1, 2, 3, 4])
-        self.assertTrue(a.equals(b))
-
-        # ignore type
-        a = BiologicalSequence('ACGT')
-        b = DNASequence('ACGT')
-        self.assertTrue(a.equals(b, ignore=['type']))
-
-        # ignore id
-        a = BiologicalSequence('ACGT', id='foo')
-        b = BiologicalSequence('ACGT', id='bar')
-        self.assertTrue(a.equals(b, ignore=['id']))
-
-        # ignore description
-        a = BiologicalSequence('ACGT', description='foo')
-        b = BiologicalSequence('ACGT', description='bar')
-        self.assertTrue(a.equals(b, ignore=['description']))
-
-        # ignore quality
-        a = BiologicalSequence('ACGT', quality=[1, 2, 3, 4])
-        b = BiologicalSequence('ACGT', quality=[5, 6, 7, 8])
-        self.assertTrue(a.equals(b, ignore=['quality']))
-
-        # ignore sequence
-        a = BiologicalSequence('ACGA')
-        b = BiologicalSequence('ACGT')
-        self.assertTrue(a.equals(b, ignore=['sequence']))
-
-        # ignore everything
-        a = BiologicalSequence('ACGA', id='foo', description='abc',
-                               quality=[1, 2, 3, 4])
-        b = DNASequence('ACGT', id='bar', description='def',
-                        quality=[5, 6, 7, 8])
-        self.assertTrue(a.equals(b, ignore=['quality', 'description', 'id',
-                                            'sequence', 'type']))
-
-    def test_equals_false(self):
-        # type mismatch
-        a = BiologicalSequence('ACGT', id='foo', description='abc',
-                               quality=[1, 2, 3, 4])
-        b = NucleotideSequence('ACGT', id='bar', description='def',
-                               quality=[5, 6, 7, 8])
-        self.assertFalse(a.equals(b, ignore=['quality', 'description', 'id']))
-
-        # id mismatch
-        a = BiologicalSequence('ACGT', id='foo')
-        b = BiologicalSequence('ACGT', id='bar')
-        self.assertFalse(a.equals(b))
-
-        # description mismatch
-        a = BiologicalSequence('ACGT', description='foo')
-        b = BiologicalSequence('ACGT', description='bar')
-        self.assertFalse(a.equals(b))
-
-        # quality mismatch (both provided)
-        a = BiologicalSequence('ACGT', quality=[1, 2, 3, 4])
-        b = BiologicalSequence('ACGT', quality=[1, 2, 3, 5])
-        self.assertFalse(a.equals(b))
-
-        # quality mismatch (one provided)
-        a = BiologicalSequence('ACGT', quality=[1, 2, 3, 4])
-        b = BiologicalSequence('ACGT')
-        self.assertFalse(a.equals(b))
-
-        # sequence mismatch
-        a = BiologicalSequence('ACGT')
-        b = BiologicalSequence('TGCA')
-        self.assertFalse(a.equals(b))
+    def test_to_default_behavior(self):
+        # minimal sequence, sequence with all optional attributes present, and
+        # a subclass of Sequence
+        for seq in (Sequence('ACGT'),
+                    Sequence('ACGT', metadata={'id': 'foo', 'desc': 'bar'},
+                             positional_metadata={'quality': range(4)}),
+                    SequenceSubclass('ACGU', metadata={'id': 'rna seq'})):
+            to = seq._to()
+            self.assertEqual(seq, to)
+            self.assertIsNot(seq, to)
+
+    def test_to_update_single_attribute(self):
+        seq = Sequence('HE..--..LLO',
+                       metadata={'id': 'hello', 'description': 'gapped hello'},
+                       positional_metadata={'quality': range(11)})
+
+        to = seq._to(metadata={'id': 'new id'})
+        self.assertIsNot(seq, to)
+        self.assertNotEqual(seq, to)
+        self.assertEqual(
+            to,
+            Sequence('HE..--..LLO', metadata={'id': 'new id'},
+                     positional_metadata={'quality': range(11)}))
+
+        # metadata shouldn't have changed on the original sequence
+        self.assertEqual(seq.metadata,
+                         {'id': 'hello', 'description': 'gapped hello'})
+
+    def test_to_update_multiple_attributes(self):
+        seq = Sequence('HE..--..LLO',
+                       metadata={'id': 'hello', 'description': 'gapped hello'},
+                       positional_metadata={'quality': range(11)})
+
+        to = seq._to(metadata={'id': 'new id', 'description': 'new desc'},
+                     positional_metadata={'quality': range(20, 25)},
+                     sequence='ACGTA')
+        self.assertIsNot(seq, to)
+        self.assertNotEqual(seq, to)
+
+        # attributes should be what we specified in the _to call...
+        self.assertEqual(to.metadata['id'], 'new id')
+        npt.assert_array_equal(to.positional_metadata['quality'],
+                               np.array([20, 21, 22, 23, 24]))
+        npt.assert_array_equal(to.values, np.array('ACGTA', dtype='c'))
+        self.assertEqual(to.metadata['description'], 'new desc')
+
+        # ...and shouldn't have changed on the original sequence
+        self.assertEqual(seq.metadata['id'], 'hello')
+        npt.assert_array_equal(seq.positional_metadata['quality'], range(11))
+        npt.assert_array_equal(seq.values, np.array('HE..--..LLO',
+                                                    dtype='c'))
+        self.assertEqual(seq.metadata['description'], 'gapped hello')
+
+    def test_to_invalid_kwargs(self):
+        seq = Sequence('ACCGGTACC', metadata={'id': "test-seq",
+                       'desc': "A test sequence"})
+
+        with self.assertRaises(TypeError):
+            seq._to(metadata={'id': 'bar'}, unrecognized_kwarg='baz')
 
     def test_count(self):
-        self.assertEqual(self.b1.count('A'), 3)
-        self.assertEqual(self.b1.count('T'), 2)
-        self.assertEqual(self.b1.count('TT'), 1)
+        def construct_char_array(s):
+            return np.fromstring(s, dtype='|S1')
 
-    def test_degap(self):
-        # use equals method to ensure that id, description, and filtered
-        # quality are correctly propagated to the resulting sequence
+        def construct_uint8_array(s):
+            return np.fromstring(s, dtype=np.uint8)
 
-        # no filtering, has quality
-        self.assertTrue(self.b1.degap().equals(self.b1))
+        seq = Sequence("1234567899876555")
+        tested = 0
+        for c in self.sequence_kinds:
+            tested += 1
+            self.assertEqual(seq.count(c('4')), 1)
+            self.assertEqual(seq.count(c('8')), 2)
+            self.assertEqual(seq.count(c('5')), 4)
+            self.assertEqual(seq.count(c('555')), 1)
+            self.assertEqual(seq.count(c('555'), 0, 4), 0)
+            self.assertEqual(seq.count(c('555'), start=0, end=4), 0)
+            self.assertEqual(seq.count(c('5'), start=10), 3)
+            self.assertEqual(seq.count(c('5'), end=10), 1)
 
-        # no filtering, doesn't have quality
-        self.assertTrue(self.b2.degap().equals(self.b2))
+            with self.assertRaises(ValueError):
+                seq.count(c(''))
 
-        # everything is filtered, has quality
-        self.assertTrue(self.b7.degap().equals(
-            BiologicalSequence('', quality=[])))
+        self.assertEqual(tested, 4)
 
-        # some filtering, has quality
-        self.assertTrue(self.b8.degap().equals(
-            BiologicalSequence('HELLO', id='hello', description='gapped hello',
-                               quality=[0, 1, 8, 9, 10])))
+    def test_count_on_subclass(self):
+        with self.assertRaises(TypeError) as cm:
+            Sequence("abcd").count(SequenceSubclass("a"))
+
+        self.assertIn("Sequence", str(cm.exception))
+        self.assertIn("SequenceSubclass", str(cm.exception))
 
     def test_distance(self):
-        # note that test_hamming_distance covers default behavior more
-        # extensively
-        self.assertEqual(self.b1.distance(self.b1), 0.0)
-        self.assertEqual(self.b1.distance(BiologicalSequence('GATTACC')), 1./7)
+        tested = 0
+        for constructor in self.sequence_kinds:
+            tested += 1
+            seq1 = Sequence("abcdef")
+            seq2 = constructor("12bcef")
 
-        def dumb_distance(x, y):
-            return 42
+            self.assertIsInstance(seq1.distance(seq1), float)
+            self.assertEqual(seq1.distance(seq2), 2.0/3.0)
 
-        self.assertEqual(
-            self.b1.distance(self.b1, distance_fn=dumb_distance), 42)
-
-    def test_distance_unequal_length(self):
-        # Hamming distance (default) requires that sequences are of equal
-        # length
-        with self.assertRaises(BiologicalSequenceError):
-            self.b1.distance(self.b2)
-
-        # alternate distance functions don't have that requirement (unless
-        # it's implemented within the provided distance function)
-        def dumb_distance(x, y):
-            return 42
-        self.assertEqual(
-            self.b1.distance(self.b2, distance_fn=dumb_distance), 42)
+        self.assertEqual(tested, 4)
 
-    def test_fraction_diff(self):
-        self.assertEqual(self.b1.fraction_diff(self.b1), 0., 5)
-        self.assertEqual(
-            self.b1.fraction_diff(BiologicalSequence('GATTACC')), 1. / 7., 5)
-
-    def test_fraction_same(self):
-        self.assertAlmostEqual(self.b1.fraction_same(self.b1), 1., 5)
-        self.assertAlmostEqual(
-            self.b1.fraction_same(BiologicalSequence('GATTACC')), 6. / 7., 5)
-
-    def test_gap_maps(self):
-        # in sequence with no gaps, the gap_maps are identical
-        self.assertEqual(self.b1.gap_maps(),
-                         ([0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]))
-        # in sequence with all gaps, the map of degapped to gapped is the empty
-        # list (bc its length is 0), and the map of gapped to degapped is all
-        # None
-        self.assertEqual(self.b7.gap_maps(),
-                         ([], [None, None, None, None, None, None]))
-
-        self.assertEqual(self.b8.gap_maps(),
-                         ([0, 1, 8, 9, 10],
-                          [0, 1, None, None, None, None, None, None, 2, 3, 4]))
-
-        # example from the gap_maps doc string
-        self.assertEqual(BiologicalSequence('-ACCGA-TA-').gap_maps(),
-                         ([1, 2, 3, 4, 5, 7, 8],
-                          [None, 0, 1, 2, 3, 4, None, 5, 6, None]))
-
-    def test_gap_vector(self):
-        self.assertEqual(self.b1.gap_vector(),
-                         [False] * len(self.b1))
-        self.assertEqual(self.b7.gap_vector(),
-                         [True] * len(self.b7))
-        self.assertEqual(self.b8.gap_vector(),
-                         [False, False, True, True, True, True,
-                          True, True, False, False, False])
-
-    def test_unsupported_characters(self):
-        self.assertEqual(self.b1.unsupported_characters(), set('GATC'))
-        self.assertEqual(self.b7.unsupported_characters(), set())
-
-    def test_has_unsupported_characters(self):
-        self.assertTrue(self.b1.has_unsupported_characters())
-        self.assertFalse(self.b7.has_unsupported_characters())
+    def test_distance_arbitrary_function(self):
+        def metric(x, y):
+            return len(x) ** 2 + len(y) ** 2
 
-    def test_index(self):
-        self.assertEqual(self.b1.index('G'), 0)
-        self.assertEqual(self.b1.index('A'), 1)
-        self.assertEqual(self.b1.index('AC'), 4)
-        self.assertRaises(ValueError, self.b1.index, 'x')
-
-    def test_is_gap(self):
-        self.assertTrue(self.b1.is_gap('.'))
-        self.assertTrue(self.b1.is_gap('-'))
-        self.assertFalse(self.b1.is_gap('A'))
-        self.assertFalse(self.b1.is_gap('x'))
-        self.assertFalse(self.b1.is_gap(' '))
-        self.assertFalse(self.b1.is_gap(''))
-
-    def test_is_gapped(self):
-        self.assertFalse(self.b1.is_gapped())
-        self.assertFalse(self.b2.is_gapped())
-        self.assertTrue(self.b7.is_gapped())
-        self.assertTrue(self.b8.is_gapped())
-
-    def test_is_valid(self):
-        self.assertFalse(self.b1.is_valid())
-        self.assertTrue(self.b7.is_valid())
-
-    def test_to_fasta(self):
-        self.assertEqual(self.b1.to_fasta(), ">\nGATTACA\n")
-        self.assertEqual(self.b1.to_fasta(terminal_character=""), ">\nGATTACA")
-        self.assertEqual(self.b2.to_fasta(),
-                         ">test-seq-2 A test sequence\nACCGGTACC\n")
-        self.assertEqual(self.b3.to_fasta(),
-                         ">test-seq-3 A protein sequence\nGREG\n")
-        self.assertEqual(self.b4.to_fasta(),
-                         ">test-seq-4\nPRTEIN\n")
-        self.assertEqual(self.b5.to_fasta(),
-                         "> some description\nLLPRTEIN\n")
-
-        # alt parameters
-        self.assertEqual(self.b2.to_fasta(field_delimiter=":"),
-                         ">test-seq-2:A test sequence\nACCGGTACC\n")
-        self.assertEqual(self.b2.to_fasta(terminal_character="!"),
-                         ">test-seq-2 A test sequence\nACCGGTACC!")
-        self.assertEqual(
-            self.b2.to_fasta(field_delimiter=":", terminal_character="!"),
-            ">test-seq-2:A test sequence\nACCGGTACC!")
-
-    def test_upper(self):
-        b = NucleotideSequence('GAt.ACa-', id='x', description='42',
-                               quality=range(8))
-        expected = NucleotideSequence('GAT.ACA-', id='x',
-                                      description='42', quality=range(8))
-        # use equals method to ensure that id, description, and quality are
-        # correctly propagated to the resulting sequence
-        self.assertTrue(b.upper().equals(expected))
-
-    def test_lower(self):
-        b = NucleotideSequence('GAt.ACa-', id='x', description='42',
-                               quality=range(8))
-        expected = NucleotideSequence('gat.aca-', id='x',
-                                      description='42', quality=range(8))
-        # use equals method to ensure that id, description, and quality are
-        # correctly propagated to the resulting sequence
-        self.assertTrue(b.lower().equals(expected))
-
-    def test_regex_iter(self):
-        pat = re_compile('(T+A)(CA)')
-
-        obs = list(self.b1.regex_iter(pat))
-        exp = [(2, 5, 'TTA'), (5, 7, 'CA')]
-        self.assertEqual(obs, exp)
+        seq1 = Sequence("12345678")
+        seq2 = Sequence("1234")
+        result = seq1.distance(seq2, metric=metric)
+        self.assertIsInstance(result, float)
+        self.assertEqual(result, 80.0)
 
-        obs = list(self.b1.regex_iter(pat, retrieve_group_0=True))
-        exp = [(2, 7, 'TTACA'), (2, 5, 'TTA'), (5, 7, 'CA')]
-        self.assertEqual(obs, exp)
+    def test_distance_default_metric(self):
+        seq1 = Sequence("abcdef")
+        seq2 = Sequence("12bcef")
+        seq_wrong = Sequence("abcdefghijklmnop")
 
+        self.assertIsInstance(seq1.distance(seq1), float)
+        self.assertEqual(seq1.distance(seq1), 0.0)
+        self.assertEqual(seq1.distance(seq2), 2.0/3.0)
 
-class NucelotideSequenceTests(TestCase):
+        with self.assertRaises(ValueError):
+            seq1.distance(seq_wrong)
 
-    def setUp(self):
-        self.empty = NucleotideSequence('')
-        self.b1 = NucleotideSequence('GATTACA')
-        self.b2 = NucleotideSequence(
-            'ACCGGUACC', id="test-seq-2",
-            description="A test sequence")
-        self.b3 = NucleotideSequence('G-AT-TG.AT.T')
-
-    def test_alphabet(self):
-        exp = {
-            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'T',
-            'W', 'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's',
-            'r', 'u', 't', 'w', 'v', 'y'
-        }
-
-        # Test calling from an instance and purely static context.
-        self.assertEqual(self.b1.alphabet(), exp)
-        self.assertEqual(NucleotideSequence.alphabet(), exp)
-
-    def test_gap_alphabet(self):
-        self.assertEqual(self.b1.gap_alphabet(), set('-.'))
-
-    def test_complement_map(self):
-        exp = {}
-        self.assertEqual(self.b1.complement_map(), exp)
-        self.assertEqual(NucleotideSequence.complement_map(), exp)
-
-    def test_iupac_standard_characters(self):
-        exp = set("ACGTUacgtu")
-        self.assertEqual(self.b1.iupac_standard_characters(), exp)
-        self.assertEqual(NucleotideSequence.iupac_standard_characters(), exp)
-
-    def test_iupac_degeneracies(self):
-        exp = {
-            # upper
-            'B': set(['C', 'U', 'T', 'G']), 'D': set(['A', 'U', 'T', 'G']),
-            'H': set(['A', 'C', 'U', 'T']), 'K': set(['U', 'T', 'G']),
-            'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'T', 'G']),
-            'S': set(['C', 'G']), 'R': set(['A', 'G']),
-            'W': set(['A', 'U', 'T']), 'V': set(['A', 'C', 'G']),
-            'Y': set(['C', 'U', 'T']),
-            # lower
-            'b': set(['c', 'u', 't', 'g']), 'd': set(['a', 'u', 't', 'g']),
-            'h': set(['a', 'c', 'u', 't']), 'k': set(['u', 't', 'g']),
-            'm': set(['a', 'c']), 'n': set(['a', 'c', 'u', 't', 'g']),
-            's': set(['c', 'g']), 'r': set(['a', 'g']),
-            'w': set(['a', 'u', 't']), 'v': set(['a', 'c', 'g']),
-            'y': set(['c', 'u', 't'])
-        }
-        self.assertEqual(self.b1.iupac_degeneracies(), exp)
-        self.assertEqual(NucleotideSequence.iupac_degeneracies(), exp)
-
-        # Test that we can modify a copy of the mapping without altering the
-        # canonical representation.
-        degen = NucleotideSequence.iupac_degeneracies()
-        degen.update({'V': set("BRO"), 'Z': set("ZORRO")})
-        self.assertNotEqual(degen, exp)
-        self.assertEqual(NucleotideSequence.iupac_degeneracies(), exp)
-
-    def test_iupac_degenerate_characters(self):
-        exp = set(['B', 'D', 'H', 'K', 'M', 'N', 'S', 'R', 'W', 'V', 'Y',
-                   'b', 'd', 'h', 'k', 'm', 'n', 's', 'r', 'w', 'v', 'y'])
-        self.assertEqual(self.b1.iupac_degenerate_characters(), exp)
-        self.assertEqual(NucleotideSequence.iupac_degenerate_characters(), exp)
-
-    def test_iupac_characters(self):
-        exp = {
-            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'T',
-            'W', 'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's',
-            'r', 'u', 't', 'w', 'v', 'y'
-        }
-
-        self.assertEqual(self.b1.iupac_characters(), exp)
-        self.assertEqual(NucleotideSequence.iupac_characters(), exp)
-
-    def test_complement(self):
-        self.assertRaises(BiologicalSequenceError,
-                          self.b1.complement)
-
-    def test_reverse_complement(self):
-        self.assertRaises(BiologicalSequenceError,
-                          self.b1.reverse_complement)
-
-    def test_is_reverse_complement(self):
-        self.assertRaises(BiologicalSequenceError,
-                          self.b1.is_reverse_complement, self.b1)
-
-    def test_nondegenerates_invalid(self):
-        with self.assertRaises(BiologicalSequenceError):
-            list(NucleotideSequence('AZA').nondegenerates())
-
-    def test_nondegenerates_empty(self):
-        self.assertEqual(list(self.empty.nondegenerates()), [self.empty])
-
-    def test_nondegenerates_no_degens(self):
-        self.assertEqual(list(self.b1.nondegenerates()), [self.b1])
-
-    def test_nondegenerates_all_degens(self):
-        # Same chars.
-        exp = [NucleotideSequence('CC'), NucleotideSequence('CG'),
-               NucleotideSequence('GC'), NucleotideSequence('GG')]
-        # Sort based on sequence string, as order is not guaranteed.
-        obs = sorted(NucleotideSequence('SS').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+        with self.assertRaises(ValueError):
+            seq_wrong.distance(seq1)
 
-        # Different chars.
-        exp = [NucleotideSequence('AC'), NucleotideSequence('AG'),
-               NucleotideSequence('GC'), NucleotideSequence('GG')]
-        obs = sorted(NucleotideSequence('RS').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+    def test_distance_on_subclass(self):
+        seq1 = Sequence("abcdef")
+        seq2 = SequenceSubclass("12bcef")
 
-        # Odd number of chars.
-        obs = list(NucleotideSequence('NNN').nondegenerates())
-        self.assertEqual(len(obs), 5**3)
+        with self.assertRaises(TypeError):
+            seq1.distance(seq2)
 
-    def test_nondegenerates_mixed_degens(self):
-        exp = [NucleotideSequence('AGC'), NucleotideSequence('AGT'),
-               NucleotideSequence('AGU'), NucleotideSequence('GGC'),
-               NucleotideSequence('GGT'), NucleotideSequence('GGU')]
-        obs = sorted(NucleotideSequence('RGY').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+    def test_matches(self):
+        tested = 0
+        for constructor in self.sequence_kinds:
+            tested += 1
+            seq1 = Sequence("AACCEEGG")
+            seq2 = constructor("ABCDEFGH")
+            expected = np.array([True, False] * 4)
+            npt.assert_equal(seq1.matches(seq2), expected)
 
-    def test_nondegenerates_gap_mixed_case(self):
-        exp = [NucleotideSequence('-A.a'), NucleotideSequence('-A.c'),
-               NucleotideSequence('-C.a'), NucleotideSequence('-C.c')]
-        obs = sorted(NucleotideSequence('-M.m').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+        self.assertEqual(tested, 4)
 
-    def test_find_features(self):
-        exp = [(0, 2, 'GA'), (4, 5, 'A'), (6, 7, 'A')]
-        obs = list(self.b1.find_features('purine_run'))
-        self.assertEqual(obs, exp)
+    def test_matches_on_subclass(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = SequenceSubclass("ABCDEFGH")
 
-        exp = [(2, 4, 'TT'), (5, 6, 'C')]
-        obs = list(self.b1.find_features('pyrimidine_run'))
-        self.assertEqual(obs, exp)
+        with self.assertRaises(TypeError):
+            seq1.matches(seq2)
 
-        exp = [(0, 1, 'A'), (3, 5, 'GG'), (6, 7, 'A')]
-        obs = list(self.b2.find_features('purine_run'))
-        self.assertEqual(obs, exp)
+    def test_matches_unequal_length(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = Sequence("TOOLONGTOCOMPARE")
 
-        exp = [(1, 3, 'CC'), (5, 6, 'U'), (7, 9, 'CC')]
-        obs = list(self.b2.find_features('pyrimidine_run'))
-        self.assertEqual(obs, exp)
+        with self.assertRaises(ValueError):
+            seq1.matches(seq2)
 
-    def test_find_features_min_length(self):
-        exp = [(0, 2, 'GA')]
-        obs = list(self.b1.find_features('purine_run', 2))
-        self.assertEqual(obs, exp)
+    def test_mismatches(self):
+        tested = 0
+        for constructor in self.sequence_kinds:
+            tested += 1
+            seq1 = Sequence("AACCEEGG")
+            seq2 = constructor("ABCDEFGH")
+            expected = np.array([False, True] * 4)
+            npt.assert_equal(seq1.mismatches(seq2), expected)
 
-        exp = [(2, 4, 'TT')]
-        obs = list(self.b1.find_features('pyrimidine_run', 2))
-        self.assertEqual(obs, exp)
+        self.assertEqual(tested, 4)
 
-        exp = [(3, 5, 'GG')]
-        obs = list(self.b2.find_features('purine_run', 2))
-        self.assertEqual(obs, exp)
+    def test_mismatches_on_subclass(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = SequenceSubclass("ABCDEFGH")
 
-        exp = [(1, 3, 'CC'), (7, 9, 'CC')]
-        obs = list(self.b2.find_features('pyrimidine_run', 2))
-        self.assertEqual(obs, exp)
+        with self.assertRaises(TypeError):
+            seq1.mismatches(seq2)
+
+    def test_mismatches_unequal_length(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = Sequence("TOOLONGTOCOMPARE")
 
-    def test_find_features_no_feature_type(self):
         with self.assertRaises(ValueError):
-            list(self.b1.find_features('nonexistent_feature_type'))
+            seq1.mismatches(seq2)
 
-    def test_find_features_allow_gaps(self):
-        exp = [(0, 3, 'G-A'), (6, 9, 'G.A')]
-        obs = list(self.b3.find_features('purine_run', 2, True))
-        self.assertEqual(obs, exp)
+    def test_mismatch_frequency(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = Sequence("ABCDEFGH")
+        seq3 = Sequence("TTTTTTTT")
 
-        exp = [(3, 6, 'T-T'), (9, 12, 'T.T')]
-        obs = list(self.b3.find_features('pyrimidine_run', 2, True))
-        self.assertEqual(obs, exp)
+        self.assertIs(type(seq1.mismatch_frequency(seq1)), int)
+        self.assertEqual(seq1.mismatch_frequency(seq1), 0)
+        self.assertEqual(seq1.mismatch_frequency(seq2), 4)
+        self.assertEqual(seq1.mismatch_frequency(seq3), 8)
+
+    def test_mismatch_frequency_relative(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = Sequence("ABCDEFGH")
+        seq3 = Sequence("TTTTTTTT")
+
+        self.assertIs(type(seq1.mismatch_frequency(seq1, relative=True)),
+                      float)
+        self.assertEqual(seq1.mismatch_frequency(seq1, relative=True), 0.0)
+        self.assertEqual(seq1.mismatch_frequency(seq2, relative=True), 0.5)
+        self.assertEqual(seq1.mismatch_frequency(seq3, relative=True), 1.0)
+
+    def test_mismatch_frequency_unequal_length(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = Sequence("TOOLONGTOCOMPARE")
+
+        with self.assertRaises(ValueError):
+            seq1.mismatch_frequency(seq2)
+
+    def test_mismatch_frequence_on_subclass(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = SequenceSubclass("ABCDEFGH")
+
+        with self.assertRaises(TypeError):
+            seq1.mismatch_frequency(seq2)
+
+    def test_match_frequency(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = Sequence("ABCDEFGH")
+        seq3 = Sequence("TTTTTTTT")
+
+        self.assertIs(type(seq1.match_frequency(seq1)), int)
+        self.assertEqual(seq1.match_frequency(seq1), 8)
+        self.assertEqual(seq1.match_frequency(seq2), 4)
+        self.assertEqual(seq1.match_frequency(seq3), 0)
+
+    def test_match_frequency_relative(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = Sequence("ABCDEFGH")
+        seq3 = Sequence("TTTTTTTT")
+
+        self.assertIs(type(seq1.match_frequency(seq1, relative=True)),
+                      float)
+        self.assertEqual(seq1.match_frequency(seq1, relative=True), 1.0)
+        self.assertEqual(seq1.match_frequency(seq2, relative=True), 0.5)
+        self.assertEqual(seq1.match_frequency(seq3, relative=True), 0.0)
+
+    def test_match_frequency_unequal_length(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = Sequence("TOOLONGTOCOMPARE")
+
+        with self.assertRaises(ValueError):
+            seq1.match_frequency(seq2)
+
+    def test_match_frequency_on_subclass(self):
+        seq1 = Sequence("AACCEEGG")
+        seq2 = SequenceSubclass("ABCDEFGH")
+
+        with self.assertRaises(TypeError):
+            seq1.match_frequency(seq2)
+
+    def test_index(self):
+        tested = 0
+        for c in self.sequence_kinds:
+            tested += 1
+            seq = Sequence("ABCDEFG@@ABCDFOO")
+            self.assertEqual(seq.index(c("A")), 0)
+            self.assertEqual(seq.index(c("@")), 7)
+            self.assertEqual(seq.index(c("@@")), 7)
+
+            with self.assertRaises(ValueError):
+                seq.index("A", start=1, end=5)
+
+        self.assertEqual(tested, 4)
+
+    def test_index_on_subclass(self):
+        with self.assertRaises(TypeError):
+            Sequence("ABCDEFG").index(SequenceSubclass("A"))
+
+        self.assertEqual(
+            SequenceSubclass("ABCDEFG").index(SequenceSubclass("A")), 0)
+
+    def _compare_kmers_results(self, observed, expected):
+        for obs, exp in zip_longest(observed, expected, fillvalue=None):
+            self.assertEqual(obs, exp)
+
+    def test_iter_kmers(self):
+        seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
+
+        expected = [
+            Sequence('G', positional_metadata={'quality': [0]}),
+            Sequence('A', positional_metadata={'quality': [1]}),
+            Sequence('T', positional_metadata={'quality': [2]}),
+            Sequence('T', positional_metadata={'quality': [3]}),
+            Sequence('A', positional_metadata={'quality': [4]}),
+            Sequence('C', positional_metadata={'quality': [5]}),
+            Sequence('A', positional_metadata={'quality': [6]})
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(1, overlap=False), expected)
 
-    def test_nondegenerates_propagate_optional_properties(self):
-        seq = NucleotideSequence('RS', id='foo', description='bar',
-                                 quality=[42, 999])
-
-        exp = [
-            NucleotideSequence('AC', id='foo', description='bar',
-                               quality=[42, 999]),
-            NucleotideSequence('AG', id='foo', description='bar',
-                               quality=[42, 999]),
-            NucleotideSequence('GC', id='foo', description='bar',
-                               quality=[42, 999]),
-            NucleotideSequence('GG', id='foo', description='bar',
-                               quality=[42, 999])
+        expected = [
+            Sequence('GA', positional_metadata={'quality': [0, 1]}),
+            Sequence('TT', positional_metadata={'quality': [2, 3]}),
+            Sequence('AC', positional_metadata={'quality': [4, 5]})
         ]
+        self._compare_kmers_results(
+            seq.iter_kmers(2, overlap=False), expected)
 
-        obs = sorted(seq.nondegenerates(), key=str)
+        expected = [
+            Sequence('GAT', positional_metadata={'quality': [0, 1, 2]}),
+            Sequence('TAC', positional_metadata={'quality': [3, 4, 5]})
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(3, overlap=False), expected)
 
-        for o, e in zip(obs, exp):
-            # use equals method to ensure that id, description, and quality are
-            # correctly propagated to the resulting sequence
-            self.assertTrue(o.equals(e))
+        expected = [
+            Sequence('GATTACA',
+                     positional_metadata={'quality': [0, 1, 2, 3, 4, 5, 6]})
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(7, overlap=False), expected)
 
+        expected = []
+        self._compare_kmers_results(
+            seq.iter_kmers(8, overlap=False), expected)
 
-class DNASequenceTests(TestCase):
+        self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
 
-    def setUp(self):
-        self.empty = DNASequence('')
-        self.b1 = DNASequence('GATTACA')
-        self.b2 = DNASequence('ACCGGTACC', id="test-seq-2",
-                              description="A test sequence", quality=range(9))
-        self.b3 = DNASequence(
-            'ACCGGUACC', id="bad-seq-1",
-            description="Not a DNA sequence")
-        self.b4 = DNASequence(
-            'MRWSYKVHDBN', id="degen",
-            description="All of the degenerate bases")
-        self.b5 = DNASequence('.G--ATTAC-A...')
-
-    def test_alphabet(self):
-        exp = {
-            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'T', 'W',
-            'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
-            't', 'w', 'v', 'y'
-        }
-
-        self.assertEqual(self.b1.alphabet(), exp)
-        self.assertEqual(DNASequence.alphabet(), exp)
-
-    def test_gap_alphabet(self):
-        self.assertEqual(self.b1.gap_alphabet(), set('-.'))
-
-    def test_complement_map(self):
-        exp = {
-            '-': '-', '.': '.', 'A': 'T', 'C': 'G', 'B': 'V', 'D': 'H',
-            'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
-            'R': 'Y', 'T': 'A', 'W': 'W', 'V': 'B', 'Y': 'R', 'a': 't',
-            'c': 'g', 'b': 'v', 'd': 'h', 'g': 'c', 'h': 'd', 'k': 'm',
-            'm': 'k', 'n': 'n', 's': 's', 'r': 'y', 't': 'a', 'w': 'w',
-            'v': 'b', 'y': 'r'
-        }
-        self.assertEqual(self.b1.complement_map(), exp)
-        self.assertEqual(DNASequence.complement_map(), exp)
-
-    def test_iupac_standard_characters(self):
-        exp = set("ACGTacgt")
-        self.assertEqual(self.b1.iupac_standard_characters(), exp)
-        self.assertEqual(DNASequence.iupac_standard_characters(), exp)
-
-    def test_iupac_degeneracies(self):
-        exp = {
-            'B': set(['C', 'T', 'G']), 'D': set(['A', 'T', 'G']),
-            'H': set(['A', 'C', 'T']), 'K': set(['T', 'G']),
-            'M': set(['A', 'C']), 'N': set(['A', 'C', 'T', 'G']),
-            'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'T']),
-            'V': set(['A', 'C', 'G']), 'Y': set(['C', 'T']),
-            'b': set(['c', 't', 'g']), 'd': set(['a', 't', 'g']),
-            'h': set(['a', 'c', 't']), 'k': set(['t', 'g']),
-            'm': set(['a', 'c']), 'n': set(['a', 'c', 't', 'g']),
-            's': set(['c', 'g']), 'r': set(['a', 'g']), 'w': set(['a', 't']),
-            'v': set(['a', 'c', 'g']), 'y': set(['c', 't'])
-        }
-        self.assertEqual(self.b1.iupac_degeneracies(), exp)
-        self.assertEqual(DNASequence.iupac_degeneracies(), exp)
-
-    def test_iupac_degenerate_characters(self):
-        exp = set(['B', 'D', 'H', 'K', 'M', 'N', 'S', 'R', 'W', 'V', 'Y',
-                   'b', 'd', 'h', 'k', 'm', 'n', 's', 'r', 'w', 'v', 'y'])
-        self.assertEqual(self.b1.iupac_degenerate_characters(), exp)
-        self.assertEqual(DNASequence.iupac_degenerate_characters(), exp)
-
-    def test_iupac_characters(self):
-        exp = {
-            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'T', 'W',
-            'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
-            't', 'w', 'v', 'y'
-        }
-        self.assertEqual(self.b1.iupac_characters(), exp)
-        self.assertEqual(DNASequence.iupac_characters(), exp)
-
-    def test_complement(self):
-        # use equals method to ensure that id, description, and quality are
-        # correctly propagated to the resulting sequence
-        self.assertTrue(self.b1.complement().equals(DNASequence("CTAATGT")))
-
-        self.assertTrue(self.b2.complement().equals(
-            DNASequence("TGGCCATGG", id="test-seq-2",
-                        description="A test sequence", quality=range(9))))
-
-        self.assertRaises(BiologicalSequenceError, self.b3.complement)
-
-        self.assertTrue(self.b4.complement().equals(
-            DNASequence("KYWSRMBDHVN", id="degen",
-                        description="All of the degenerate bases")))
-
-        self.assertTrue(self.b5.complement().equals(
-            DNASequence(".C--TAATG-T...")))
-
-    def test_reverse_complement(self):
-        # use equals method to ensure that id, description, and (reversed)
-        # quality scores are correctly propagated to the resulting sequence
-        self.assertTrue(self.b1.reverse_complement().equals(
-            DNASequence("TGTAATC")))
-
-        self.assertTrue(self.b2.reverse_complement().equals(
-            DNASequence("GGTACCGGT", id="test-seq-2",
-                        description="A test sequence",
-                        quality=range(9)[::-1])))
-
-        self.assertRaises(BiologicalSequenceError, self.b3.reverse_complement)
-
-        self.assertTrue(self.b4.reverse_complement().equals(
-            DNASequence("NVHDBMRSWYK", id="degen",
-                        description="All of the degenerate bases")))
-
-    def test_unsupported_characters(self):
-        self.assertEqual(self.b1.unsupported_characters(), set())
-        self.assertEqual(self.b2.unsupported_characters(), set())
-        self.assertEqual(self.b3.unsupported_characters(), set('U'))
-        self.assertEqual(self.b4.unsupported_characters(), set())
-
-    def test_has_unsupported_characters(self):
-        self.assertFalse(self.b1.has_unsupported_characters())
-        self.assertFalse(self.b2.has_unsupported_characters())
-        self.assertTrue(self.b3.has_unsupported_characters())
-        self.assertFalse(self.b4.has_unsupported_characters())
-
-    def test_is_reverse_complement(self):
-        self.assertFalse(self.b1.is_reverse_complement(self.b1))
-
-        # id, description, and quality scores should be ignored (only sequence
-        # data and type should be compared)
-        self.assertTrue(self.b1.is_reverse_complement(
-            DNASequence('TGTAATC', quality=range(7))))
+    def test_iter_kmers_no_positional_metadata(self):
+        seq = Sequence('GATTACA')
 
-        self.assertTrue(
-            self.b4.is_reverse_complement(DNASequence('NVHDBMRSWYK')))
+        expected = [
+            Sequence('G'),
+            Sequence('A'),
+            Sequence('T'),
+            Sequence('T'),
+            Sequence('A'),
+            Sequence('C'),
+            Sequence('A')
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(1, overlap=False), expected)
 
-    def test_nondegenerates_invalid(self):
-        with self.assertRaises(BiologicalSequenceError):
-            list(DNASequence('AZA').nondegenerates())
+        expected = [
+            Sequence('GA'),
+            Sequence('TT'),
+            Sequence('AC')
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(2, overlap=False), expected)
 
-    def test_nondegenerates_empty(self):
-        self.assertEqual(list(self.empty.nondegenerates()), [self.empty])
+        expected = [
+            Sequence('GAT'),
+            Sequence('TAC')
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(3, overlap=False), expected)
 
-    def test_nondegenerates_no_degens(self):
-        self.assertEqual(list(self.b1.nondegenerates()), [self.b1])
+        expected = [
+            Sequence('GATTACA')
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(7, overlap=False), expected)
 
-    def test_nondegenerates_all_degens(self):
-        # Same chars.
-        exp = [DNASequence('CC'), DNASequence('CG'), DNASequence('GC'),
-               DNASequence('GG')]
-        # Sort based on sequence string, as order is not guaranteed.
-        obs = sorted(DNASequence('SS').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+        expected = []
+        self._compare_kmers_results(
+            seq.iter_kmers(8, overlap=False), expected)
 
-        # Different chars.
-        exp = [DNASequence('AC'), DNASequence('AG'), DNASequence('GC'),
-               DNASequence('GG')]
-        obs = sorted(DNASequence('RS').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+        self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
 
-        # Odd number of chars.
-        obs = list(DNASequence('NNN').nondegenerates())
-        self.assertEqual(len(obs), 4**3)
+    def test_iter_kmers_with_overlap(self):
+        seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
+        expected = [
+            Sequence('G', positional_metadata={'quality': [0]}),
+            Sequence('A', positional_metadata={'quality': [1]}),
+            Sequence('T', positional_metadata={'quality': [2]}),
+            Sequence('T', positional_metadata={'quality': [3]}),
+            Sequence('A', positional_metadata={'quality': [4]}),
+            Sequence('C', positional_metadata={'quality': [5]}),
+            Sequence('A', positional_metadata={'quality': [6]})
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(1, overlap=True), expected)
 
-    def test_nondegenerates_mixed_degens(self):
-        exp = [DNASequence('AGC'), DNASequence('AGT'), DNASequence('GGC'),
-               DNASequence('GGT')]
-        obs = sorted(DNASequence('RGY').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+        expected = [
+            Sequence('GA', positional_metadata={'quality': [0, 1]}),
+            Sequence('AT', positional_metadata={'quality': [1, 2]}),
+            Sequence('TT', positional_metadata={'quality': [2, 3]}),
+            Sequence('TA', positional_metadata={'quality': [3, 4]}),
+            Sequence('AC', positional_metadata={'quality': [4, 5]}),
+            Sequence('CA', positional_metadata={'quality': [5, 6]})
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(2, overlap=True), expected)
 
-    def test_nondegenerates_gap_mixed_case(self):
-        exp = [DNASequence('-A.a'), DNASequence('-A.c'),
-               DNASequence('-C.a'), DNASequence('-C.c')]
-        obs = sorted(DNASequence('-M.m').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+        expected = [
+            Sequence('GAT', positional_metadata={'quality': [0, 1, 2]}),
+            Sequence('ATT', positional_metadata={'quality': [1, 2, 3]}),
+            Sequence('TTA', positional_metadata={'quality': [2, 3, 4]}),
+            Sequence('TAC', positional_metadata={'quality': [3, 4, 5]}),
+            Sequence('ACA', positional_metadata={'quality': [4, 5, 6]})
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(3, overlap=True), expected)
 
+        expected = [
+            Sequence('GATTACA',
+                     positional_metadata={'quality': [0, 1, 2, 3, 4, 5, 6]})
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(7, overlap=True), expected)
 
-class RNASequenceTests(TestCase):
+        expected = []
+        self._compare_kmers_results(
+            seq.iter_kmers(8, overlap=True), expected)
 
-    def setUp(self):
-        self.empty = RNASequence('')
-        self.b1 = RNASequence('GAUUACA')
-        self.b2 = RNASequence('ACCGGUACC', id="test-seq-2",
-                              description="A test sequence", quality=range(9))
-        self.b3 = RNASequence(
-            'ACCGGTACC', id="bad-seq-1",
-            description="Not a RNA sequence")
-        self.b4 = RNASequence(
-            'MRWSYKVHDBN', id="degen",
-            description="All of the degenerate bases")
-        self.b5 = RNASequence('.G--AUUAC-A...')
-
-    def test_alphabet(self):
-        exp = {
-            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'W',
-            'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
-            'u', 'w', 'v', 'y'
-        }
-
-        self.assertEqual(self.b1.alphabet(), exp)
-        self.assertEqual(RNASequence.alphabet(), exp)
-
-    def test_gap_alphabet(self):
-        self.assertEqual(self.b1.gap_alphabet(), set('-.'))
-
-    def test_complement_map(self):
-        exp = {
-            '-': '-', '.': '.', 'A': 'U', 'C': 'G', 'B': 'V', 'D': 'H',
-            'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
-            'R': 'Y', 'U': 'A', 'W': 'W', 'V': 'B', 'Y': 'R', 'a': 'u',
-            'c': 'g', 'b': 'v', 'd': 'h', 'g': 'c', 'h': 'd', 'k': 'm',
-            'm': 'k', 'n': 'n', 's': 's', 'r': 'y', 'u': 'a', 'w': 'w',
-            'v': 'b', 'y': 'r'
-        }
-        self.assertEqual(self.b1.complement_map(), exp)
-        self.assertEqual(RNASequence.complement_map(), exp)
-
-    def test_iupac_standard_characters(self):
-        exp = set("ACGUacgu")
-        self.assertEqual(self.b1.iupac_standard_characters(), exp)
-        self.assertEqual(RNASequence.iupac_standard_characters(), exp)
-
-    def test_iupac_degeneracies(self):
-        exp = {
-            'B': set(['C', 'U', 'G']), 'D': set(['A', 'U', 'G']),
-            'H': set(['A', 'C', 'U']), 'K': set(['U', 'G']),
-            'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'G']),
-            'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'U']),
-            'V': set(['A', 'C', 'G']), 'Y': set(['C', 'U']),
-            'b': set(['c', 'u', 'g']), 'd': set(['a', 'u', 'g']),
-            'h': set(['a', 'c', 'u']), 'k': set(['u', 'g']),
-            'm': set(['a', 'c']), 'n': set(['a', 'c', 'u', 'g']),
-            's': set(['c', 'g']), 'r': set(['a', 'g']), 'w': set(['a', 'u']),
-            'v': set(['a', 'c', 'g']), 'y': set(['c', 'u'])
-        }
-        self.assertEqual(self.b1.iupac_degeneracies(), exp)
-        self.assertEqual(RNASequence.iupac_degeneracies(), exp)
-
-    def test_iupac_degenerate_characters(self):
-        exp = set(['B', 'D', 'H', 'K', 'M', 'N', 'S', 'R', 'W', 'V', 'Y',
-                   'b', 'd', 'h', 'k', 'm', 'n', 's', 'r', 'w', 'v', 'y'])
-        self.assertEqual(self.b1.iupac_degenerate_characters(), exp)
-        self.assertEqual(RNASequence.iupac_degenerate_characters(), exp)
-
-    def test_iupac_characters(self):
-        exp = {
-            'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'W',
-            'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
-            'u', 'w', 'v', 'y'
-        }
-        self.assertEqual(self.b1.iupac_characters(), exp)
-        self.assertEqual(RNASequence.iupac_characters(), exp)
-
-    def test_complement(self):
-        # use equals method to ensure that id, description, and quality are
-        # correctly propagated to the resulting sequence
-        self.assertTrue(self.b1.complement().equals(RNASequence("CUAAUGU")))
-
-        self.assertTrue(self.b2.complement().equals(
-            RNASequence("UGGCCAUGG", id="test-seq-2",
-                        description="A test sequence", quality=range(9))))
-
-        self.assertRaises(BiologicalSequenceError, self.b3.complement)
-
-        self.assertTrue(self.b4.complement().equals(
-            RNASequence("KYWSRMBDHVN", id="degen",
-                        description="All of the degenerate bases")))
-
-        self.assertTrue(self.b5.complement().equals(
-            RNASequence(".C--UAAUG-U...")))
-
-    def test_reverse_complement(self):
-        # use equals method to ensure that id, description, and (reversed)
-        # quality scores are correctly propagated to the resulting sequence
-        self.assertTrue(self.b1.reverse_complement().equals(
-            RNASequence("UGUAAUC")))
-
-        self.assertTrue(self.b2.reverse_complement().equals(
-            RNASequence("GGUACCGGU", id="test-seq-2",
-                        description="A test sequence",
-                        quality=range(9)[::-1])))
-
-        self.assertRaises(BiologicalSequenceError, self.b3.reverse_complement)
-
-        self.assertTrue(self.b4.reverse_complement().equals(
-            RNASequence("NVHDBMRSWYK", id="degen",
-                        description="All of the degenerate bases")))
-
-    def test_unsupported_characters(self):
-        self.assertEqual(self.b1.unsupported_characters(), set())
-        self.assertEqual(self.b2.unsupported_characters(), set())
-        self.assertEqual(self.b3.unsupported_characters(), set('T'))
-        self.assertEqual(self.b4.unsupported_characters(), set())
-
-    def test_has_unsupported_characters(self):
-        self.assertFalse(self.b1.has_unsupported_characters())
-        self.assertFalse(self.b2.has_unsupported_characters())
-        self.assertTrue(self.b3.has_unsupported_characters())
-        self.assertFalse(self.b4.has_unsupported_characters())
-
-    def test_is_reverse_complement(self):
-        self.assertFalse(self.b1.is_reverse_complement(self.b1))
-
-        # id, description, and quality scores should be ignored (only sequence
-        # data and type should be compared)
-        self.assertTrue(self.b1.is_reverse_complement(
-            RNASequence('UGUAAUC', quality=range(7))))
+        self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
 
-        self.assertTrue(
-            self.b4.is_reverse_complement(RNASequence('NVHDBMRSWYK')))
+    def test_iter_kmers_with_overlap_no_positional_metadata(self):
+        seq = Sequence('GATTACA')
+        expected = [
+            Sequence('G'),
+            Sequence('A'),
+            Sequence('T'),
+            Sequence('T'),
+            Sequence('A'),
+            Sequence('C'),
+            Sequence('A')
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(1, overlap=True), expected)
+
+        expected = [
+            Sequence('GA'),
+            Sequence('AT'),
+            Sequence('TT'),
+            Sequence('TA'),
+            Sequence('AC'),
+            Sequence('CA')
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(2, overlap=True), expected)
 
-    def test_nondegenerates_invalid(self):
-        with self.assertRaises(BiologicalSequenceError):
-            list(RNASequence('AZA').nondegenerates())
+        expected = [
+            Sequence('GAT'),
+            Sequence('ATT'),
+            Sequence('TTA'),
+            Sequence('TAC'),
+            Sequence('ACA')
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(3, overlap=True), expected)
 
-    def test_nondegenerates_empty(self):
-        self.assertEqual(list(self.empty.nondegenerates()), [self.empty])
+        expected = [
+            Sequence('GATTACA')
+        ]
+        self._compare_kmers_results(
+            seq.iter_kmers(7, overlap=True), expected)
 
-    def test_nondegenerates_no_degens(self):
-        self.assertEqual(list(self.b1.nondegenerates()), [self.b1])
+        expected = []
+        self._compare_kmers_results(
+            seq.iter_kmers(8, overlap=True), expected)
 
-    def test_nondegenerates_all_degens(self):
-        # Same chars.
-        exp = [RNASequence('CC'), RNASequence('CG'), RNASequence('GC'),
-               RNASequence('GG')]
-        # Sort based on sequence string, as order is not guaranteed.
-        obs = sorted(RNASequence('SS').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+        self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
 
-        # Different chars.
-        exp = [RNASequence('AC'), RNASequence('AG'), RNASequence('GC'),
-               RNASequence('GG')]
-        obs = sorted(RNASequence('RS').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+    def test_iter_kmers_invalid_k(self):
+        seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
 
-        # Odd number of chars.
-        obs = list(RNASequence('NNN').nondegenerates())
-        self.assertEqual(len(obs), 4**3)
+        with self.assertRaises(ValueError):
+            list(seq.iter_kmers(0))
 
-    def test_nondegenerates_mixed_degens(self):
-        exp = [RNASequence('AGC'), RNASequence('AGU'), RNASequence('GGC'),
-               RNASequence('GGU')]
-        obs = sorted(RNASequence('RGY').nondegenerates(), key=str)
-        self.assertEqual(obs, exp)
+        with self.assertRaises(ValueError):
+            list(seq.iter_kmers(-42))
+
+    def test_iter_kmers_invalid_k_no_positional_metadata(self):
+        seq = Sequence('GATTACA')
+
+        with self.assertRaises(ValueError):
+            list(seq.iter_kmers(0))
+
+        with self.assertRaises(ValueError):
+            list(seq.iter_kmers(-42))
+
+    def test_iter_kmers_different_sequences(self):
+        seq = Sequence('HE..--..LLO',
+                       metadata={'id': 'hello', 'desc': 'gapped hello'},
+                       positional_metadata={'quality': range(11)})
+        expected = [
+            Sequence('HE.', positional_metadata={'quality': [0, 1, 2]},
+                     metadata={'id': 'hello', 'desc': 'gapped hello'}),
+            Sequence('.--', positional_metadata={'quality': [3, 4, 5]},
+                     metadata={'id': 'hello', 'desc': 'gapped hello'}),
+            Sequence('..L', positional_metadata={'quality': [6, 7, 8]},
+                     metadata={'id': 'hello', 'desc': 'gapped hello'})
+        ]
+        self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
+
+    def test_iter_kmers_different_sequences_no_positional_metadata(self):
+        seq = Sequence('HE..--..LLO',
+                       metadata={'id': 'hello', 'desc': 'gapped hello'})
+        expected = [
+            Sequence('HE.',
+                     metadata={'id': 'hello', 'desc': 'gapped hello'}),
+            Sequence('.--',
+                     metadata={'id': 'hello', 'desc': 'gapped hello'}),
+            Sequence('..L',
+                     metadata={'id': 'hello', 'desc': 'gapped hello'})
+        ]
+        self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
+
+    def test_kmer_frequencies(self):
+        seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
+        # overlap = True
+        expected = Counter('GATTACA')
+        self.assertEqual(seq.kmer_frequencies(1, overlap=True), expected)
+        expected = Counter(['GAT', 'ATT', 'TTA', 'TAC', 'ACA'])
+        self.assertEqual(seq.kmer_frequencies(3, overlap=True), expected)
+        expected = Counter([])
+        self.assertEqual(seq.kmer_frequencies(8, overlap=True), expected)
+
+        # overlap = False
+        expected = Counter(['GAT', 'TAC'])
+        self.assertEqual(seq.kmer_frequencies(3, overlap=False), expected)
+        expected = Counter(['GATTACA'])
+        self.assertEqual(seq.kmer_frequencies(7, overlap=False), expected)
+        expected = Counter([])
+        self.assertEqual(seq.kmer_frequencies(8, overlap=False), expected)
+
+    def test_kmer_frequencies_relative(self):
+        seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
+        # overlap = True
+        expected = defaultdict(float)
+        expected['A'] = 3/7.
+        expected['C'] = 1/7.
+        expected['G'] = 1/7.
+        expected['T'] = 2/7.
+        self.assertEqual(seq.kmer_frequencies(1, overlap=True, relative=True),
+                         expected)
+        expected = defaultdict(float)
+        expected['GAT'] = 1/5.
+        expected['ATT'] = 1/5.
+        expected['TTA'] = 1/5.
+        expected['TAC'] = 1/5.
+        expected['ACA'] = 1/5.
+        self.assertEqual(seq.kmer_frequencies(3, overlap=True, relative=True),
+                         expected)
+        expected = defaultdict(float)
+        self.assertEqual(seq.kmer_frequencies(8, overlap=True, relative=True),
+                         expected)
+
+        # overlap = False
+        expected = defaultdict(float)
+        expected['GAT'] = 1/2.
+        expected['TAC'] = 1/2.
+        self.assertEqual(seq.kmer_frequencies(3, overlap=False, relative=True),
+                         expected)
+        expected = defaultdict(float)
+        expected['GATTACA'] = 1.0
+        self.assertEqual(seq.kmer_frequencies(7, overlap=False, relative=True),
+                         expected)
+        expected = defaultdict(float)
+        self.assertEqual(seq.kmer_frequencies(8, overlap=False, relative=True),
+                         expected)
+
+    def test_kmer_frequencies_floating_point_precision(self):
+        # Test that a sequence having no variation in k-words yields a
+        # frequency of exactly 1.0. Note that it is important to use
+        # self.assertEqual here instead of self.assertAlmostEqual because we
+        # want to test for exactly 1.0. A previous implementation of
+        # Sequence.kmer_frequencies(relative=True) added (1 / num_words) for
+        # each occurrence of a k-word to compute the frequencies (see
+        # https://github.com/biocore/scikit-bio/issues/801). In certain cases,
+        # this yielded a frequency slightly less than 1.0 due to roundoff
+        # error. The test case here uses a sequence with 10 characters that are
+        # all identical and computes k-word frequencies with k=1. This test
+        # case exposes the roundoff error present in the previous
+        # implementation because there are 10 k-words (which are all
+        # identical), so 1/10 added 10 times yields a number slightly less than
+        # 1.0. This occurs because 1/10 cannot be represented exactly as a
+        # floating point number.
+        seq = Sequence('AAAAAAAAAA')
+        self.assertEqual(seq.kmer_frequencies(1, relative=True),
+                         defaultdict(float, {'A': 1.0}))
+
+    def test_find_with_regex(self):
+        seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
+        pat = re.compile('(T+A)(CA)')
 
-    def test_nondegenerates_gap_mixed_case(self):
-        exp = [RNASequence('-A.a'), RNASequence('-A.c'),
-               RNASequence('-C.a'), RNASequence('-C.c')]
-        obs = sorted(RNASequence('-M.m').nondegenerates(), key=str)
+        obs = list(seq.find_with_regex(pat))
+        exp = [slice(2, 5), slice(5, 7)]
         self.assertEqual(obs, exp)
 
+        self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
 
-class ProteinSequenceTests(TestCase):
+    def test_find_with_regex_string_as_input(self):
+        seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
+        pat = '(T+A)(CA)'
 
-    def setUp(self):
-        self.empty = ProteinSequence('')
-        self.p1 = ProteinSequence('GREG')
-        self.p2 = ProteinSequence(
-            'PRTEINSEQNCE', id="test-seq-2",
-            description="A test sequence")
-        self.p3 = ProteinSequence(
-            'PROTEIN', id="bad-seq-1",
-            description="Not a protein sequence")
-
-    def test_alphabet(self):
-        exp = {
-            'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N',
-            'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c',
-            'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'p', 'q', 'r',
-            's', 't', 'v', 'w', 'x', 'y', 'z'
-        }
-
-        self.assertEqual(self.p1.alphabet(), exp)
-        self.assertEqual(ProteinSequence.alphabet(), exp)
-
-    def test_gap_alphabet(self):
-        self.assertEqual(self.p1.gap_alphabet(), set('-.'))
-
-    def test_iupac_standard_characters(self):
-        exp = set("ACDEFGHIKLMNPQRSTVWYacdefghiklmnpqrstvwy")
-        self.assertEqual(self.p1.iupac_standard_characters(), exp)
-        self.assertEqual(ProteinSequence.iupac_standard_characters(), exp)
-
-    def test_iupac_degeneracies(self):
-        exp = {
-            'B': set(['D', 'N']), 'Z': set(['E', 'Q']),
-            'X': set(['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M',
-                      'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']),
-            'b': set(['d', 'n']), 'z': set(['e', 'q']),
-            'x': set(['a', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm',
-                      'n', 'p', 'q', 'r', 's', 't', 'v', 'w', 'y']),
-        }
-        self.assertEqual(self.p1.iupac_degeneracies(), exp)
-        self.assertEqual(ProteinSequence.iupac_degeneracies(), exp)
-
-    def test_iupac_degenerate_characters(self):
-        exp = set(['B', 'X', 'Z', 'b', 'x', 'z'])
-        self.assertEqual(self.p1.iupac_degenerate_characters(), exp)
-        self.assertEqual(ProteinSequence.iupac_degenerate_characters(), exp)
-
-    def test_iupac_characters(self):
-        exp = {
-            'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N',
-            'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b',
-            'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'p', 'q',
-            'r', 's', 't', 'v', 'w', 'x', 'y', 'z'
-        }
-        self.assertEqual(self.p1.iupac_characters(), exp)
-        self.assertEqual(ProteinSequence.iupac_characters(), exp)
-
-    def test_nondegenerates(self):
-        exp = [ProteinSequence('AD'), ProteinSequence('AN')]
-        # Sort based on sequence string, as order is not guaranteed.
-        obs = sorted(ProteinSequence('AB').nondegenerates(), key=str)
+        obs = list(seq.find_with_regex(pat))
+        exp = [slice(2, 5), slice(5, 7)]
         self.assertEqual(obs, exp)
 
+        self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
+
+    def test_find_with_regex_no_groups(self):
+        seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
+        pat = re.compile('(FOO)')
+        self.assertEqual(list(seq.find_with_regex(pat)), [])
+
+    def test_find_with_regex_ignore_no_difference(self):
+        seq = Sequence('..ABCDEFG..')
+        pat = "([A-Z]+)"
+        exp = [slice(2, 9)]
+        self.assertEqual(list(seq.find_with_regex(pat)), exp)
+
+        obs = seq.find_with_regex(
+            pat, ignore=np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
+                                 dtype=bool))
+        self.assertEqual(list(obs), exp)
+
+    def test_find_with_regex_ignore(self):
+        obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
+            "(A+)", ignore=np.array([0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1,
+                                     1, 0, 0, 1, 1, 0, 1], dtype=bool))
+
+        self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
+                                     slice(19, 20)])
+
+    def test_find_with_regex_ignore_index_array(self):
+        obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
+            "(A+)", ignore=np.array([1, 2, 4, 5, 11, 13, 14, 17, 18, 20]))
+
+        self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
+                                     slice(19, 20)])
+
+    def test_iter_contiguous_index_array(self):
+        s = Sequence("0123456789abcdef")
+        for c in list, tuple, np.array, pd.Series:
+            exp = [Sequence("0123"), Sequence("89ab")]
+            obs = s.iter_contiguous(c([0, 1, 2, 3, 8, 9, 10, 11]))
+            self.assertEqual(list(obs), exp)
+
+    def test_iter_contiguous_boolean_vector(self):
+        s = Sequence("0123456789abcdef")
+        for c in list, tuple, np.array, pd.Series:
+            exp = [Sequence("0123"), Sequence("89ab")]
+            obs = s.iter_contiguous(c(([True] * 4 + [False] * 4) * 2))
+            self.assertEqual(list(obs), exp)
+
+    def test_iter_contiguous_iterable_slices(self):
+        def spaced_out():
+            yield slice(0, 4)
+            yield slice(8, 12)
+
+        def contiguous():
+            yield slice(0, 4)
+            yield slice(4, 8)
+            yield slice(12, 16)
+
+        s = Sequence("0123456789abcdef")
+        for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
+                  lambda x: pd.Series(tuple(x))):
+            exp = [Sequence("0123"), Sequence("89ab")]
+            obs = s.iter_contiguous(c(spaced_out()))
+            self.assertEqual(list(obs), exp)
+
+            exp = [Sequence("01234567"), Sequence("cdef")]
+            obs = s.iter_contiguous(c(contiguous()))
+            self.assertEqual(list(obs), exp)
+
+    def test_iter_contiguous_with_max_length(self):
+        s = Sequence("0123456789abcdef")
+        for c in list, tuple, np.array, pd.Series:
+            exp = [Sequence("234"), Sequence("678"), Sequence("abc")]
+            obs = s.iter_contiguous(c([True, False, True, True] * 4),
+                                    min_length=3)
+            self.assertEqual(list(obs), exp)
+
+            exp = [Sequence("0"), Sequence("234"), Sequence("678"),
+                   Sequence("abc"), Sequence("ef")]
+            obs1 = list(s.iter_contiguous(c([True, False, True, True] * 4),
+                                          min_length=1))
+
+            obs2 = list(s.iter_contiguous(c([True, False, True, True] * 4)))
+            self.assertEqual(obs1, obs2)
+            self.assertEqual(obs1, exp)
+
+    def test_iter_contiguous_with_invert(self):
+        def spaced_out():
+            yield slice(0, 4)
+            yield slice(8, 12)
+
+        def contiguous():
+            yield slice(0, 4)
+            yield slice(4, 8)
+            yield slice(12, 16)
+
+        s = Sequence("0123456789abcdef")
+        for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
+                  lambda x: pd.Series(tuple(x))):
+            exp = [Sequence("4567"), Sequence("cdef")]
+            obs = s.iter_contiguous(c(spaced_out()), invert=True)
+            self.assertEqual(list(obs), exp)
+
+            exp = [Sequence("89ab")]
+            obs = s.iter_contiguous(c(contiguous()), invert=True)
+            self.assertEqual(list(obs), exp)
+
+    def test_has_metadata(self):
+        # truly missing
+        seq = Sequence('ACGT')
+        self.assertFalse(seq.has_metadata())
+        # metadata attribute should be None and not initialized to a "missing"
+        # representation
+        self.assertIsNone(seq._metadata)
+
+        # looks empty
+        seq = Sequence('ACGT', metadata={})
+        self.assertFalse(seq.has_metadata())
+
+        # metadata is present
+        seq = Sequence('ACGT', metadata={'foo': 42})
+        self.assertTrue(seq.has_metadata())
+
+    def test_has_positional_metadata(self):
+        # truly missing
+        seq = Sequence('ACGT')
+        self.assertFalse(seq.has_positional_metadata())
+        # positional metadata attribute should be None and not initialized to a
+        # "missing" representation
+        self.assertIsNone(seq._positional_metadata)
+
+        # looks empty
+        seq = Sequence('ACGT',
+                       positional_metadata=pd.DataFrame(index=np.arange(4)))
+        self.assertFalse(seq.has_positional_metadata())
+
+        # positional metadata is present
+        seq = Sequence('ACGT', positional_metadata={'foo': [1, 2, 3, 4]})
+        self.assertTrue(seq.has_positional_metadata())
+
+    def test_copy_without_metadata(self):
+        # shallow vs deep copy with sequence only should be equivalent. thus,
+        # copy.copy, copy.deepcopy, and Sequence.copy(deep=True|False) should
+        # all be equivalent
+        for copy_method in (lambda seq: seq.copy(deep=False),
+                            lambda seq: seq.copy(deep=True),
+                            copy.copy, copy.deepcopy):
+            seq = Sequence('ACGT')
+            seq_copy = copy_method(seq)
+
+            self.assertEqual(seq_copy, seq)
+            self.assertIsNot(seq_copy, seq)
+            self.assertIsNot(seq_copy._bytes, seq._bytes)
+
+            # metadata attributes should be None and not initialized to a
+            # "missing" representation
+            self.assertIsNone(seq._metadata)
+            self.assertIsNone(seq._positional_metadata)
+            self.assertIsNone(seq_copy._metadata)
+            self.assertIsNone(seq_copy._positional_metadata)
+
+    def test_copy_with_metadata_shallow(self):
+        # copy.copy and Sequence.copy should behave identically
+        for copy_method in lambda seq: seq.copy(), copy.copy:
+            seq = Sequence('ACGT', metadata={'foo': [1]},
+                           positional_metadata={'bar': [[], [], [], []],
+                                                'baz': [42, 42, 42, 42]})
+            seq_copy = copy_method(seq)
+
+            self.assertEqual(seq_copy, seq)
+            self.assertIsNot(seq_copy, seq)
+            self.assertIsNot(seq_copy._bytes, seq._bytes)
+            self.assertIsNot(seq_copy._metadata, seq._metadata)
+            self.assertIsNot(seq_copy._positional_metadata,
+                             seq._positional_metadata)
+            self.assertIsNot(seq_copy._positional_metadata.values,
+                             seq._positional_metadata.values)
+            self.assertIs(seq_copy._metadata['foo'], seq._metadata['foo'])
+            self.assertIs(seq_copy._positional_metadata.loc[0, 'bar'],
+                          seq._positional_metadata.loc[0, 'bar'])
+
+            seq_copy.metadata['foo'].append(2)
+            seq_copy.metadata['foo2'] = 42
+
+            self.assertEqual(seq_copy.metadata, {'foo': [1, 2], 'foo2': 42})
+            self.assertEqual(seq.metadata, {'foo': [1, 2]})
+
+            seq_copy.positional_metadata.loc[0, 'bar'].append(1)
+            seq_copy.positional_metadata.loc[0, 'baz'] = 43
+
+            assert_data_frame_almost_equal(
+                seq_copy.positional_metadata,
+                pd.DataFrame({'bar': [[1], [], [], []],
+                              'baz': [43, 42, 42, 42]}))
+            assert_data_frame_almost_equal(
+                seq.positional_metadata,
+                pd.DataFrame({'bar': [[1], [], [], []],
+                              'baz': [42, 42, 42, 42]}))
+
+    def test_copy_with_metadata_deep(self):
+        # copy.deepcopy and Sequence.copy(deep=True) should behave identically
+        for copy_method in lambda seq: seq.copy(deep=True), copy.deepcopy:
+            seq = Sequence('ACGT', metadata={'foo': [1]},
+                           positional_metadata={'bar': [[], [], [], []],
+                                                'baz': [42, 42, 42, 42]})
+            seq_copy = copy_method(seq)
+
+            self.assertEqual(seq_copy, seq)
+            self.assertIsNot(seq_copy, seq)
+            self.assertIsNot(seq_copy._bytes, seq._bytes)
+            self.assertIsNot(seq_copy._metadata, seq._metadata)
+            self.assertIsNot(seq_copy._positional_metadata,
+                             seq._positional_metadata)
+            self.assertIsNot(seq_copy._positional_metadata.values,
+                             seq._positional_metadata.values)
+            self.assertIsNot(seq_copy._metadata['foo'], seq._metadata['foo'])
+            self.assertIsNot(seq_copy._positional_metadata.loc[0, 'bar'],
+                             seq._positional_metadata.loc[0, 'bar'])
+
+            seq_copy.metadata['foo'].append(2)
+            seq_copy.metadata['foo2'] = 42
+
+            self.assertEqual(seq_copy.metadata, {'foo': [1, 2], 'foo2': 42})
+            self.assertEqual(seq.metadata, {'foo': [1]})
+
+            seq_copy.positional_metadata.loc[0, 'bar'].append(1)
+            seq_copy.positional_metadata.loc[0, 'baz'] = 43
+
+            assert_data_frame_almost_equal(
+                seq_copy.positional_metadata,
+                pd.DataFrame({'bar': [[1], [], [], []],
+                              'baz': [43, 42, 42, 42]}))
+            assert_data_frame_almost_equal(
+                seq.positional_metadata,
+                pd.DataFrame({'bar': [[], [], [], []],
+                              'baz': [42, 42, 42, 42]}))
+
+    def test_deepcopy_memo_is_respected(self):
+        # basic test to ensure deepcopy's memo is passed through to recursive
+        # deepcopy calls
+        seq = Sequence('ACGT', metadata={'foo': 'bar'})
+        memo = {}
+        copy.deepcopy(seq, memo)
+        self.assertGreater(len(memo), 2)
+
+    def test_munge_to_index_array_valid_index_array(self):
+        s = Sequence('123456')
+
+        for c in list, tuple, np.array, pd.Series:
+            exp = np.array([1, 2, 3], dtype=int)
+            obs = s._munge_to_index_array(c([1, 2, 3]))
+            npt.assert_equal(obs, exp)
+
+            exp = np.array([1, 3, 5], dtype=int)
+            obs = s._munge_to_index_array(c([1, 3, 5]))
+            npt.assert_equal(obs, exp)
+
+    def test_munge_to_index_array_invalid_index_array(self):
+        s = Sequence("12345678")
+        for c in list, tuple, np.array, pd.Series:
+            with self.assertRaises(ValueError):
+                s._munge_to_index_array(c([3, 2, 1]))
+
+            with self.assertRaises(ValueError):
+                s._munge_to_index_array(c([5, 6, 7, 2]))
+
+            with self.assertRaises(ValueError):
+                s._munge_to_index_array(c([0, 1, 2, 1]))
+
+    def test_munge_to_index_array_valid_bool_array(self):
+        s = Sequence('123456')
+
+        for c in list, tuple, np.array, pd.Series:
+            exp = np.array([2, 3, 5], dtype=int)
+            obs = s._munge_to_index_array(
+                c([False, False, True, True, False, True]))
+            npt.assert_equal(obs, exp)
+
+            exp = np.array([], dtype=int)
+            obs = s._munge_to_index_array(
+                c([False] * 6))
+            npt.assert_equal(obs, exp)
+
+            exp = np.arange(6)
+            obs = s._munge_to_index_array(
+                c([True] * 6))
+            npt.assert_equal(obs, exp)
+
+    def test_munge_to_index_array_invalid_bool_array(self):
+        s = Sequence('123456')
+
+        for c in (list, tuple, lambda x: np.array(x, dtype=bool),
+                  lambda x: pd.Series(x, dtype=bool)):
+
+            with self.assertRaises(ValueError):
+                s._munge_to_index_array(c([]))
+
+            with self.assertRaises(ValueError):
+                s._munge_to_index_array(c([True]))
+
+            with self.assertRaises(ValueError):
+                s._munge_to_index_array(c([True] * 10))
+
+    def test_munge_to_index_array_valid_iterable(self):
+        s = Sequence('')
+
+        def slices_only():
+            return (slice(i, i+1) for i in range(0, 10, 2))
+
+        def mixed():
+            return (slice(i, i+1) if i % 2 == 0 else i for i in range(10))
+
+        def unthinkable():
+            for i in range(10):
+                if i % 3 == 0:
+                    yield slice(i, i+1)
+                elif i % 3 == 1:
+                    yield i
+                else:
+                    yield np.array([i], dtype=int)
+        for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
+                  lambda x: pd.Series(tuple(x))):
+            exp = np.arange(10, dtype=int)
+            obs = s._munge_to_index_array(c(mixed()))
+            npt.assert_equal(obs, exp)
+
+            exp = np.arange(10, dtype=int)
+            obs = s._munge_to_index_array(c(unthinkable()))
+            npt.assert_equal(obs, exp)
+
+            exp = np.arange(10, step=2, dtype=int)
+            obs = s._munge_to_index_array(c(slices_only()))
+            npt.assert_equal(obs, exp)
+
+    def test_munge_to_index_array_invalid_iterable(self):
+        s = Sequence('')
+
+        def bad1():
+            yield "r"
+            yield [1, 2, 3]
+
+        def bad2():
+            yield 1
+            yield 'str'
+
+        def bad3():
+            yield False
+            yield True
+            yield 2
+
+        def bad4():
+            yield np.array([False, True])
+            yield slice(2, 5)
+
+        for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
+                  lambda x: pd.Series(tuple(x))):
+
+            with self.assertRaises(TypeError):
+                s._munge_to_index_array(bad1())
+
+            with self.assertRaises(TypeError):
+                s._munge_to_index_array(bad2())
+
+            with self.assertRaises(TypeError):
+                s._munge_to_index_array(bad3())
+
+            with self.assertRaises(TypeError):
+                s._munge_to_index_array(bad4())
+
+    def test_munge_to_index_array_valid_string(self):
+        seq = Sequence('ACGTACGT',
+                       positional_metadata={'introns': [False, True, True,
+                                                        False, False, True,
+                                                        False, False]})
+        npt.assert_equal(np.array([1, 2, 5]),
+                         seq._munge_to_index_array('introns'))
+
+        seq.positional_metadata['exons'] = ~seq.positional_metadata['introns']
+        npt.assert_equal(np.array([0, 3, 4, 6, 7]),
+                         seq._munge_to_index_array('exons'))
+
+    def test_munge_to_index_array_invalid_string(self):
+        seq_str = 'ACGT'
+        seq = Sequence(seq_str,
+                       positional_metadata={'quality': range(len(seq_str))})
+
+        with six.assertRaisesRegex(self, ValueError,
+                                   "No positional metadata associated with "
+                                   "key 'introns'"):
+            seq._munge_to_index_array('introns')
+
+        with six.assertRaisesRegex(self, TypeError,
+                                   "Column 'quality' in positional metadata "
+                                   "does not correspond to a boolean "
+                                   "vector"):
+            seq._munge_to_index_array('quality')
+
+    def test_munge_to_bytestring_return_bytes(self):
+        seq = Sequence('')
+        m = 'dummy_method'
+        str_inputs = ('', 'a', 'acgt')
+        unicode_inputs = (u'', u'a', u'acgt')
+        byte_inputs = (b'', b'a', b'acgt')
+        seq_inputs = (Sequence(''), Sequence('a'), Sequence('acgt'))
+        all_inputs = str_inputs + unicode_inputs + byte_inputs + seq_inputs
+        all_expected = [b'', b'a', b'acgt'] * 4
+
+        for input_, expected in zip(all_inputs, all_expected):
+            observed = seq._munge_to_bytestring(input_, m)
+            self.assertEqual(observed, expected)
+            self.assertIs(type(observed), bytes)
+
+    def test_munge_to_bytestring_unicode_out_of_ascii_range(self):
+        seq = Sequence('')
+        all_inputs = (u'\x80', u'abc\x80', u'\x80abc')
+        for input_ in all_inputs:
+            with six.assertRaisesRegex(self, UnicodeEncodeError,
+                                       "'ascii' codec can't encode character"
+                                       ".*in position.*: ordinal not in"
+                                       " range\(128\)"):
+                seq._munge_to_bytestring(input_, 'dummy_method')
+
+
+# NOTE: this must be a *separate* class for doctests only (no unit tests). nose
+# will not run the unit tests otherwise
+#
+# these doctests exercise the correct formatting of Sequence's repr in a
+# variety of situations. they are more extensive than the unit tests above
+# (TestSequence.test_repr) but are only currently run in py2. thus, they cannot
+# be relied upon for coverage (the unit tests take care of this)
+class SequenceReprDoctests(object):
+    r"""
+    >>> from skbio import Sequence
+
+    Empty (minimal) sequence:
+
+    >>> Sequence('')
+    Sequence
+    -------------
+    Stats:
+        length: 0
+    -------------
+
+    Single character sequence:
+
+    >>> Sequence('G')
+    Sequence
+    -------------
+    Stats:
+        length: 1
+    -------------
+    0 G
+
+    Multicharacter sequence:
+
+    >>> Sequence('ACGT')
+    Sequence
+    -------------
+    Stats:
+        length: 4
+    -------------
+    0 ACGT
+
+    Full single line:
+
+    >>> Sequence('A' * 60)
+    Sequence
+    -------------------------------------------------------------------
+    Stats:
+        length: 60
+    -------------------------------------------------------------------
+    0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+
+    Full single line with 1 character overflow:
+
+    >>> Sequence('A' * 61)
+    Sequence
+    --------------------------------------------------------------------
+    Stats:
+        length: 61
+    --------------------------------------------------------------------
+    0  AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    60 A
+
+    Two full lines:
+
+    >>> Sequence('T' * 120)
+    Sequence
+    --------------------------------------------------------------------
+    Stats:
+        length: 120
+    --------------------------------------------------------------------
+    0  TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
+    60 TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
+
+    Two full lines with 1 character overflow:
+
+    >>> Sequence('T' * 121)
+    Sequence
+    ---------------------------------------------------------------------
+    Stats:
+        length: 121
+    ---------------------------------------------------------------------
+    0   TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
+    60  TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
+    120 T
+
+    Five full lines (maximum amount of information):
+
+    >>> Sequence('A' * 300)
+    Sequence
+    ---------------------------------------------------------------------
+    Stats:
+        length: 300
+    ---------------------------------------------------------------------
+    0   AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    60  AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    120 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    180 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    240 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+
+    Six lines starts "summarized" output:
+
+    >>> Sequence('A' * 301)
+    Sequence
+    ---------------------------------------------------------------------
+    Stats:
+        length: 301
+    ---------------------------------------------------------------------
+    0   AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    60  AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    ...
+    240 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    300 A
+
+    A naive algorithm would assume the width of the first column (noting
+    position) based on the sequence's length alone. This can be off by one if
+    the last position (in the last line) has a shorter width than the width
+    calculated from the sequence's length. This test case ensures that only a
+    single space is inserted between position 99960 and the first sequence
+    chunk:
+
+    >>> Sequence('A' * 100000)
+    Sequence
+    -----------------------------------------------------------------------
+    Stats:
+        length: 100000
+    -----------------------------------------------------------------------
+    0     AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    60    AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    ...
+    99900 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    99960 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+
+    The largest sequence that can be displayed using six chunks per line:
+
+    >>> Sequence('A' * 100020)
+    Sequence
+    -----------------------------------------------------------------------
+    Stats:
+        length: 100020
+    -----------------------------------------------------------------------
+    0     AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    60    AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    ...
+    99900 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    99960 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+
+    A single character longer than the previous sequence causes the optimal
+    number of chunks per line to be 5:
+
+    >>> Sequence('A' * 100021)
+    Sequence
+    -------------------------------------------------------------
+    Stats:
+        length: 100021
+    -------------------------------------------------------------
+    0      AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    50     AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    ...
+    99950  AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
+    100000 AAAAAAAAAA AAAAAAAAAA A
+
+    Wide range of characters (locale-independent):
+
+    >>> import string
+    >>> Sequence((string.ascii_letters + string.punctuation + string.digits +
+    ...          'a space') * 567)
+    Sequence
+    -----------------------------------------------------------------------
+    Stats:
+        length: 57267
+    -----------------------------------------------------------------------
+    0     abcdefghij klmnopqrst uvwxyzABCD EFGHIJKLMN OPQRSTUVWX YZ!"#$%&'(
+    60    )*+,-./:;< =>?@[\]^_` {|}~012345 6789a spac eabcdefghi jklmnopqrs
+    ...
+    57180 opqrstuvwx yzABCDEFGH IJKLMNOPQR STUVWXYZ!" #$%&'()*+, -./:;<=>?@
+    57240 [\]^_`{|}~ 0123456789 a space
+
+    Supply horrendous metadata and positional metadata to exercise a variety of
+    metadata formatting cases and rules. Sorting should be by type, then by
+    value within each type (Python 3 doesn't allow sorting of mixed types):
+
+    >>> metadata = {
+    ...     # str key, str value
+    ...     'abc': 'some description',
+    ...     # int value
+    ...     'foo': 42,
+    ...     # unsupported type (dict) value
+    ...     'bar': {},
+    ...     # int key, wrapped str (single line)
+    ...     42: 'some words to test text wrapping and such... yada yada yada '
+    ...         'yada yada yada yada yada.',
+    ...     # bool key, wrapped str (multi-line)
+    ...     True: 'abc ' * 34,
+    ...     # float key, truncated str (too long)
+    ...     42.5: 'abc ' * 200,
+    ...     # unsupported type (tuple) key, unsupported type (list) value
+    ...     ('foo', 'bar'): [1, 2, 3],
+    ...     # unicode key, single long word that wraps
+    ...     u'long word': 'abc' * 30,
+    ...     # truncated key (too long), None value
+    ...     'too long of a key name to display in repr': None,
+    ...     # wrapped unicode value (has u'' prefix)
+    ...     'unicode wrapped value': u'abcd' * 25,
+    ...     # float value
+    ...     0.1: 99.9999,
+    ...     # bool value
+    ...     43: False,
+    ...     # None key, complex value
+    ...     None: complex(-1.0, 0.0),
+    ...     # nested quotes
+    ...     10: '"\''}
+    ... }
+    >>> positional_metadata = {
+    ...     # str key, int list value
+    ...     'foo': [1, 2, 3, 4],
+    ...     # float key, float list value
+    ...     42.5: [2.5, 3.0, 4.2, -0.00001],
+    ...     # int key, object list value
+    ...     42: [[], 4, 5, {}],
+    ...     # truncated key (too long), bool list value
+    ...     'abc' * 90: [True, False, False, True],
+    ...     # None key
+    ...     None: range(4)}
+    >>> Sequence('ACGT', metadata=metadata,
+    ...          positional_metadata=positional_metadata)
+    Sequence
+    -----------------------------------------------------------------------
+    Metadata:
+        None: (-1+0j)
+        True: 'abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
+               abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
+               abc abc abc abc '
+        0.1: 99.9999
+        42.5: <type 'str'>
+        10: '"\''
+        42: 'some words to test text wrapping and such... yada yada yada
+             yada yada yada yada yada.'
+        43: False
+        'abc': 'some description'
+        'bar': <type 'dict'>
+        'foo': 42
+        <type 'str'>: None
+        'unicode wrapped value': u'abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd
+                                   abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd
+                                   abcdabcdabcdabcdabcd'
+        <type 'tuple'>: <type 'list'>
+        u'long word': 'abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabca
+                       bcabcabcabcabcabcabcabcabcabcabcabcabc'
+    Positional metadata:
+        None: <dtype: int64>
+        42: <dtype: object>
+        42.5: <dtype: float64>
+        <type 'str'>: <dtype: bool>
+        'foo': <dtype: int64>
+    Stats:
+        length: 4
+    -----------------------------------------------------------------------
+    0 ACGT
+    """
+    pass
+
+
 if __name__ == "__main__":
     main()
diff --git a/skbio/stats/__init__.py b/skbio/stats/__init__.py
index c24b3ef..83fb67c 100644
--- a/skbio/stats/__init__.py
+++ b/skbio/stats/__init__.py
@@ -14,10 +14,12 @@ Subpackages
    :toctree: generated/
 
    distance
+   evolve
    ordination
    spatial
    gradient
    power
+   composition
 
 Functions
 ---------
@@ -25,12 +27,11 @@ Functions
 .. autosummary::
    :toctree: generated/
 
-   p_value_to_str
-   subsample
    subsample_counts
    isubsample
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -39,11 +40,12 @@ Functions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
 
-from ._misc import p_value_to_str
-from ._subsample import subsample, subsample_counts, isubsample
+from ._subsample import subsample_counts, isubsample
 
-__all__ = ['p_value_to_str', 'subsample', 'subsample_counts', 'isubsample']
+__all__ = ['subsample_counts', 'isubsample']
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/stats/__subsample.c b/skbio/stats/__subsample.c
index cdf9b0f..261692c 100644
--- a/skbio/stats/__subsample.c
+++ b/skbio/stats/__subsample.c
@@ -1,4 +1,12 @@
-/* Generated by Cython 0.20.2 on Wed Nov 19 10:53:19 2014 */
+/* Generated by Cython 0.22 */
+
+/* BEGIN: Cython Metadata
+{
+    "distutils": {
+        "depends": []
+    }
+}
+END: Cython Metadata */
 
 #define PY_SSIZE_T_CLEAN
 #ifndef CYTHON_USE_PYLONG_INTERNALS
@@ -16,11 +24,11 @@
 #include "Python.h"
 #ifndef Py_PYTHON_H
     #error Python headers needed to compile C extensions, please install development version of Python.
-#elif PY_VERSION_HEX < 0x02040000
-    #error Cython requires Python 2.4+.
+#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
+    #error Cython requires Python 2.6+ or Python 3.2+.
 #else
-#define CYTHON_ABI "0_20_2"
-#include <stddef.h> /* For offsetof */
+#define CYTHON_ABI "0_22"
+#include <stddef.h>
 #ifndef offsetof
 #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
 #endif
@@ -54,65 +62,11 @@
 #define CYTHON_COMPILING_IN_PYPY 0
 #define CYTHON_COMPILING_IN_CPYTHON 1
 #endif
-#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
 #define Py_OptimizeFlag 0
 #endif
-#if PY_VERSION_HEX < 0x02050000
-  typedef int Py_ssize_t;
-  #define PY_SSIZE_T_MAX INT_MAX
-  #define PY_SSIZE_T_MIN INT_MIN
-  #define PY_FORMAT_SIZE_T ""
-  #define CYTHON_FORMAT_SSIZE_T ""
-  #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
-  #define PyInt_AsSsize_t(o)   __Pyx_PyInt_As_int(o)
-  #define PyNumber_Index(o)    ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \
-                                (PyErr_Format(PyExc_TypeError, \
-                                              "expected index value, got %.200s", Py_TYPE(o)->tp_name), \
-                                 (PyObject*)0))
-  #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \
-                                  !PyComplex_Check(o))
-  #define PyIndex_Check __Pyx_PyIndex_Check
-  #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
-  #define __PYX_BUILD_PY_SSIZE_T "i"
-#else
-  #define __PYX_BUILD_PY_SSIZE_T "n"
-  #define CYTHON_FORMAT_SSIZE_T "z"
-  #define __Pyx_PyIndex_Check PyIndex_Check
-#endif
-#if PY_VERSION_HEX < 0x02060000
-  #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
-  #define Py_TYPE(ob)   (((PyObject*)(ob))->ob_type)
-  #define Py_SIZE(ob)   (((PyVarObject*)(ob))->ob_size)
-  #define PyVarObject_HEAD_INIT(type, size) \
-          PyObject_HEAD_INIT(type) size,
-  #define PyType_Modified(t)
-  typedef struct {
-     void *buf;
-     PyObject *obj;
-     Py_ssize_t len;
-     Py_ssize_t itemsize;
-     int readonly;
-     int ndim;
-     char *format;
-     Py_ssize_t *shape;
-     Py_ssize_t *strides;
-     Py_ssize_t *suboffsets;
-     void *internal;
-  } Py_buffer;
-  #define PyBUF_SIMPLE 0
-  #define PyBUF_WRITABLE 0x0001
-  #define PyBUF_FORMAT 0x0004
-  #define PyBUF_ND 0x0008
-  #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
-  #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
-  #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
-  #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
-  #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
-  #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE)
-  #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE)
-  typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
-  typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
-#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
 #if PY_MAJOR_VERSION < 3
   #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
   #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
@@ -124,22 +78,11 @@
           PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
   #define __Pyx_DefaultClassType PyType_Type
 #endif
-#if PY_VERSION_HEX < 0x02060000
-  #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict")
-#endif
 #if PY_MAJOR_VERSION >= 3
   #define Py_TPFLAGS_CHECKTYPES 0
   #define Py_TPFLAGS_HAVE_INDEX 0
-#endif
-#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
   #define Py_TPFLAGS_HAVE_NEWBUFFER 0
 #endif
-#if PY_VERSION_HEX < 0x02060000
-  #define Py_TPFLAGS_HAVE_VERSION_TAG 0
-#endif
-#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT)
-  #define Py_TPFLAGS_IS_ABSTRACT 0
-#endif
 #if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
   #define Py_TPFLAGS_HAVE_FINALIZE 0
 #endif
@@ -164,12 +107,14 @@
 #if CYTHON_COMPILING_IN_PYPY
   #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
   #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
+  #define __Pyx_PyFrozenSet_Size(s)         PyObject_Size(s)
 #else
   #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
   #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
       PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+  #define __Pyx_PyFrozenSet_Size(s)         PySet_Size(s)
 #endif
-#define __Pyx_PyString_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyString_FormatSafe(a, b)   ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
 #define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
 #if PY_MAJOR_VERSION >= 3
   #define __Pyx_PyString_Format(a, b)  PyUnicode_Format(a, b)
@@ -183,36 +128,13 @@
   #define PyString_Check               PyUnicode_Check
   #define PyString_CheckExact          PyUnicode_CheckExact
 #endif
-#if PY_VERSION_HEX < 0x02060000
-  #define PyBytesObject                PyStringObject
-  #define PyBytes_Type                 PyString_Type
-  #define PyBytes_Check                PyString_Check
-  #define PyBytes_CheckExact           PyString_CheckExact
-  #define PyBytes_FromString           PyString_FromString
-  #define PyBytes_FromStringAndSize    PyString_FromStringAndSize
-  #define PyBytes_FromFormat           PyString_FromFormat
-  #define PyBytes_DecodeEscape         PyString_DecodeEscape
-  #define PyBytes_AsString             PyString_AsString
-  #define PyBytes_AsStringAndSize      PyString_AsStringAndSize
-  #define PyBytes_Size                 PyString_Size
-  #define PyBytes_AS_STRING            PyString_AS_STRING
-  #define PyBytes_GET_SIZE             PyString_GET_SIZE
-  #define PyBytes_Repr                 PyString_Repr
-  #define PyBytes_Concat               PyString_Concat
-  #define PyBytes_ConcatAndDel         PyString_ConcatAndDel
-#endif
 #if PY_MAJOR_VERSION >= 3
   #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
   #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
 #else
-  #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \
-                                         PyString_Check(obj) || PyUnicode_Check(obj))
+  #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
   #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
 #endif
-#if PY_VERSION_HEX < 0x02060000
-  #define PySet_Check(obj)             PyObject_TypeCheck(obj, &PySet_Type)
-  #define PyFrozenSet_Check(obj)       PyObject_TypeCheck(obj, &PyFrozenSet_Type)
-#endif
 #ifndef PySet_CheckExact
   #define PySet_CheckExact(obj)        (Py_TYPE(obj) == &PySet_Type)
 #endif
@@ -237,6 +159,11 @@
 #if PY_MAJOR_VERSION >= 3
   #define PyBoolObject                 PyLongObject
 #endif
+#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
+  #ifndef PyUnicode_InternFromString
+    #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
+  #endif
+#endif
 #if PY_VERSION_HEX < 0x030200A4
   typedef long Py_hash_t;
   #define __Pyx_PyInt_FromHash_t PyInt_FromLong
@@ -245,42 +172,10 @@
   #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
   #define __Pyx_PyInt_AsHash_t   PyInt_AsSsize_t
 #endif
-#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
-  #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
-  #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
-  #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
-#else
-  #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
-        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
-        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
-            (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
-  #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
-        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
-        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
-            (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
-  #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
-        (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
-        (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
-            (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
-#endif
 #if PY_MAJOR_VERSION >= 3
-  #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
-#endif
-#if PY_VERSION_HEX < 0x02050000
-  #define __Pyx_GetAttrString(o,n)   PyObject_GetAttrString((o),((char *)(n)))
-  #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
-  #define __Pyx_DelAttrString(o,n)   PyObject_DelAttrString((o),((char *)(n)))
-#else
-  #define __Pyx_GetAttrString(o,n)   PyObject_GetAttrString((o),(n))
-  #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
-  #define __Pyx_DelAttrString(o,n)   PyObject_DelAttrString((o),(n))
-#endif
-#if PY_VERSION_HEX < 0x02050000
-  #define __Pyx_NAMESTR(n) ((char *)(n))
-  #define __Pyx_DOCSTR(n)  ((char *)(n))
+  #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
 #else
-  #define __Pyx_NAMESTR(n) (n)
-  #define __Pyx_DOCSTR(n)  (n)
+  #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
 #endif
 #ifndef CYTHON_INLINE
   #if defined(__GNUC__)
@@ -316,11 +211,22 @@ static CYTHON_INLINE float __PYX_NAN() {
   return value;
 }
 #endif
+#define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None)
 #ifdef __cplusplus
 template<typename T>
 void __Pyx_call_destructor(T* x) {
     x->~T();
 }
+template<typename T>
+class __Pyx_FakeReference {
+  public:
+    __Pyx_FakeReference() : ptr(NULL) { }
+    __Pyx_FakeReference(T& ref) : ptr(&ref) { }
+    T *operator->() { return ptr; }
+    operator T&() { return *ptr; }
+  private:
+    T *ptr;
+};
 #endif
 
 
@@ -373,7 +279,7 @@ void __Pyx_call_destructor(T* x) {
 # endif
 #endif
 typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
-                const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+                const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
 
 #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
@@ -406,11 +312,11 @@ static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
 #endif
 #define __Pyx_PyObject_AsSString(s)    ((signed char*) __Pyx_PyObject_AsString(s))
 #define __Pyx_PyObject_AsUString(s)    ((unsigned char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_FromUString(s)  __Pyx_PyObject_FromString((const char*)s)
-#define __Pyx_PyBytes_FromUString(s)   __Pyx_PyBytes_FromString((const char*)s)
-#define __Pyx_PyByteArray_FromUString(s)   __Pyx_PyByteArray_FromString((const char*)s)
-#define __Pyx_PyStr_FromUString(s)     __Pyx_PyStr_FromString((const char*)s)
-#define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((const char*)s)
+#define __Pyx_PyObject_FromCString(s)  __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromCString(s)   __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromCString(s)   __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromCString(s)     __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
 #if PY_MAJOR_VERSION < 3
 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
 {
@@ -446,7 +352,7 @@ static int __Pyx_init_sys_getdefaultencoding_params(void) {
     const char* default_encoding_c;
     sys = PyImport_ImportModule("sys");
     if (!sys) goto bad;
-    default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+    default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
     Py_DECREF(sys);
     if (!default_encoding) goto bad;
     default_encoding_c = PyBytes_AsString(default_encoding);
@@ -554,7 +460,7 @@ static const char *__pyx_filename;
 
 
 static const char *__pyx_f[] = {
-  "__subsample.pyx",
+  "skbio/stats/__subsample.pyx",
   "__init__.pxd",
   "type.pxd",
 };
@@ -562,12 +468,12 @@ static const char *__pyx_f[] = {
 struct __Pyx_StructField_;
 #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
 typedef struct {
-  const char* name; /* for error messages only */
+  const char* name;
   struct __Pyx_StructField_* fields;
-  size_t size;     /* sizeof(type) */
-  size_t arraysize[8]; /* length of array in each dimension */
+  size_t size;
+  size_t arraysize[8];
   int ndim;
-  char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */
+  char typegroup;
   char is_unsigned;
   int flags;
 } __Pyx_TypeInfo;
@@ -594,7 +500,7 @@ typedef struct {
 } __Pyx_BufFmt_Context;
 
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
  * # in Cython to enable them only on the right systems.
  * 
  * ctypedef npy_int8       int8_t             # <<<<<<<<<<<<<<
@@ -603,7 +509,7 @@ typedef struct {
  */
 typedef npy_int8 __pyx_t_5numpy_int8_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727
  * 
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t             # <<<<<<<<<<<<<<
@@ -612,7 +518,7 @@ typedef npy_int8 __pyx_t_5numpy_int8_t;
  */
 typedef npy_int16 __pyx_t_5numpy_int16_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728
  * ctypedef npy_int8       int8_t
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t             # <<<<<<<<<<<<<<
@@ -621,7 +527,7 @@ typedef npy_int16 __pyx_t_5numpy_int16_t;
  */
 typedef npy_int32 __pyx_t_5numpy_int32_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":729
  * ctypedef npy_int16      int16_t
  * ctypedef npy_int32      int32_t
  * ctypedef npy_int64      int64_t             # <<<<<<<<<<<<<<
@@ -630,7 +536,7 @@ typedef npy_int32 __pyx_t_5numpy_int32_t;
  */
 typedef npy_int64 __pyx_t_5numpy_int64_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
  * #ctypedef npy_int128     int128_t
  * 
  * ctypedef npy_uint8      uint8_t             # <<<<<<<<<<<<<<
@@ -639,7 +545,7 @@ typedef npy_int64 __pyx_t_5numpy_int64_t;
  */
 typedef npy_uint8 __pyx_t_5numpy_uint8_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734
  * 
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t             # <<<<<<<<<<<<<<
@@ -648,7 +554,7 @@ typedef npy_uint8 __pyx_t_5numpy_uint8_t;
  */
 typedef npy_uint16 __pyx_t_5numpy_uint16_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735
  * ctypedef npy_uint8      uint8_t
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t             # <<<<<<<<<<<<<<
@@ -657,7 +563,7 @@ typedef npy_uint16 __pyx_t_5numpy_uint16_t;
  */
 typedef npy_uint32 __pyx_t_5numpy_uint32_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":736
  * ctypedef npy_uint16     uint16_t
  * ctypedef npy_uint32     uint32_t
  * ctypedef npy_uint64     uint64_t             # <<<<<<<<<<<<<<
@@ -666,7 +572,7 @@ typedef npy_uint32 __pyx_t_5numpy_uint32_t;
  */
 typedef npy_uint64 __pyx_t_5numpy_uint64_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740
  * #ctypedef npy_uint128    uint128_t
  * 
  * ctypedef npy_float32    float32_t             # <<<<<<<<<<<<<<
@@ -675,7 +581,7 @@ typedef npy_uint64 __pyx_t_5numpy_uint64_t;
  */
 typedef npy_float32 __pyx_t_5numpy_float32_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":741
  * 
  * ctypedef npy_float32    float32_t
  * ctypedef npy_float64    float64_t             # <<<<<<<<<<<<<<
@@ -684,7 +590,7 @@ typedef npy_float32 __pyx_t_5numpy_float32_t;
  */
 typedef npy_float64 __pyx_t_5numpy_float64_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750
  * # The int types are mapped a bit surprising --
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t             # <<<<<<<<<<<<<<
@@ -693,7 +599,7 @@ typedef npy_float64 __pyx_t_5numpy_float64_t;
  */
 typedef npy_long __pyx_t_5numpy_int_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
  * # numpy.int corresponds to 'l' and numpy.long to 'q'
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t             # <<<<<<<<<<<<<<
@@ -702,7 +608,7 @@ typedef npy_long __pyx_t_5numpy_int_t;
  */
 typedef npy_longlong __pyx_t_5numpy_long_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
  * ctypedef npy_long       int_t
  * ctypedef npy_longlong   long_t
  * ctypedef npy_longlong   longlong_t             # <<<<<<<<<<<<<<
@@ -711,7 +617,7 @@ typedef npy_longlong __pyx_t_5numpy_long_t;
  */
 typedef npy_longlong __pyx_t_5numpy_longlong_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754
  * ctypedef npy_longlong   longlong_t
  * 
  * ctypedef npy_ulong      uint_t             # <<<<<<<<<<<<<<
@@ -720,7 +626,7 @@ typedef npy_longlong __pyx_t_5numpy_longlong_t;
  */
 typedef npy_ulong __pyx_t_5numpy_uint_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
  * 
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t             # <<<<<<<<<<<<<<
@@ -729,7 +635,7 @@ typedef npy_ulong __pyx_t_5numpy_uint_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
  * ctypedef npy_ulong      uint_t
  * ctypedef npy_ulonglong  ulong_t
  * ctypedef npy_ulonglong  ulonglong_t             # <<<<<<<<<<<<<<
@@ -738,7 +644,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
  */
 typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
  * ctypedef npy_ulonglong  ulonglong_t
  * 
  * ctypedef npy_intp       intp_t             # <<<<<<<<<<<<<<
@@ -747,7 +653,7 @@ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
  */
 typedef npy_intp __pyx_t_5numpy_intp_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
  * 
  * ctypedef npy_intp       intp_t
  * ctypedef npy_uintp      uintp_t             # <<<<<<<<<<<<<<
@@ -756,7 +662,7 @@ typedef npy_intp __pyx_t_5numpy_intp_t;
  */
 typedef npy_uintp __pyx_t_5numpy_uintp_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761
  * ctypedef npy_uintp      uintp_t
  * 
  * ctypedef npy_double     float_t             # <<<<<<<<<<<<<<
@@ -765,7 +671,7 @@ typedef npy_uintp __pyx_t_5numpy_uintp_t;
  */
 typedef npy_double __pyx_t_5numpy_float_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
  * 
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t             # <<<<<<<<<<<<<<
@@ -774,7 +680,7 @@ typedef npy_double __pyx_t_5numpy_float_t;
  */
 typedef npy_double __pyx_t_5numpy_double_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
  * ctypedef npy_double     float_t
  * ctypedef npy_double     double_t
  * ctypedef npy_longdouble longdouble_t             # <<<<<<<<<<<<<<
@@ -805,7 +711,7 @@ typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
 
 /*--- Type declarations ---*/
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765
  * ctypedef npy_longdouble longdouble_t
  * 
  * ctypedef npy_cfloat      cfloat_t             # <<<<<<<<<<<<<<
@@ -814,7 +720,7 @@ typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
  */
 typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
  * 
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t             # <<<<<<<<<<<<<<
@@ -823,7 +729,7 @@ typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
  */
 typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":767
  * ctypedef npy_cfloat      cfloat_t
  * ctypedef npy_cdouble     cdouble_t
  * ctypedef npy_clongdouble clongdouble_t             # <<<<<<<<<<<<<<
@@ -832,7 +738,7 @@ typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
  */
 typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
  * ctypedef npy_clongdouble clongdouble_t
  * 
  * ctypedef npy_cdouble     complex_t             # <<<<<<<<<<<<<<
@@ -840,6 +746,8 @@ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
  * cdef inline object PyArray_MultiIterNew1(a):
  */
 typedef npy_cdouble __pyx_t_5numpy_complex_t;
+
+/* --- Runtime support code (head) --- */
 #ifndef CYTHON_REFNANNY
   #define CYTHON_REFNANNY 0
 #endif
@@ -853,7 +761,7 @@ typedef npy_cdouble __pyx_t_5numpy_complex_t;
     void (*FinishContext)(void**);
   } __Pyx_RefNannyAPIStruct;
   static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
-  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
+  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
   #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
 #ifdef WITH_THREAD
   #define __Pyx_RefNannySetupContext(name, acquire_gil) \
@@ -890,7 +798,7 @@ typedef npy_cdouble __pyx_t_5numpy_complex_t;
   #define __Pyx_XDECREF(r) Py_XDECREF(r)
   #define __Pyx_XGOTREF(r)
   #define __Pyx_XGIVEREF(r)
-#endif /* CYTHON_REFNANNY */
+#endif
 #define __Pyx_XDECREF_SET(r, v) do {                            \
         PyObject *tmp = (PyObject *) r;                         \
         r = v; __Pyx_XDECREF(tmp);                              \
@@ -917,48 +825,74 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject
 #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
 #endif
 
-static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
 
 static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
-    Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
+    Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
 
-static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
 
 static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
     PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
-    const char* function_name); /*proto*/
+    const char* function_name);
 
 static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
-    const char *name, int exact); /*proto*/
+    const char *name, int exact);
 
 static CYTHON_INLINE int  __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
     __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
 static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
 
-static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
 
 #if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
 #else
 #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
 #endif
 
-static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
 
-static void __Pyx_RaiseBufferFallbackError(void); /*proto*/
+static void __Pyx_RaiseBufferFallbackError(void);
 
-static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/
+static void __Pyx_RaiseBufferIndexError(int axis);
 
 #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
 static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
         PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
         PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
         int has_cstart, int has_cstop, int wraparound);
 
-static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
-static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb);
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
 
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
+#if PY_MAJOR_VERSION >= 3
+static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
+    PyObject *value;
+    value = PyDict_GetItemWithError(d, key);
+    if (unlikely(!value)) {
+        if (!PyErr_Occurred()) {
+            PyObject* args = PyTuple_Pack(1, key);
+            if (likely(args))
+                PyErr_SetObject(PyExc_KeyError, args);
+            Py_XDECREF(args);
+        }
+        return NULL;
+    }
+    Py_INCREF(value);
+    return value;
+}
+#else
+    #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
+#endif
 
 static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
 
@@ -967,6 +901,23 @@ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
 static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
 
 typedef struct {
+    int code_line;
+    PyCodeObject* code_object;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+    int count;
+    int max_count;
+    __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+                               int py_line, const char *filename);
+
+typedef struct {
   Py_ssize_t shape, strides, suboffsets;
 } __Pyx_Buf_DimInfo;
 typedef struct {
@@ -991,7 +942,7 @@ typedef struct {
 static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
 static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
 
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value);
 
@@ -1115,28 +1066,11 @@ static int __Pyx_check_binary_version(void);
 #endif
 #endif
 
-static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
-
-static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);  /*proto*/
-
-typedef struct {
-    int code_line;
-    PyCodeObject* code_object;
-} __Pyx_CodeObjectCacheEntry;
-struct __Pyx_CodeObjectCache {
-    int count;
-    int max_count;
-    __Pyx_CodeObjectCacheEntry* entries;
-};
-static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
-static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
-static PyCodeObject *__pyx_find_code_object(int code_line);
-static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+static PyObject *__Pyx_ImportModule(const char *name);
 
-static void __Pyx_AddTraceback(const char *funcname, int c_line,
-                               int py_line, const char *filename); /*proto*/
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
 
-static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
 
 
 /* Module declarations from 'cpython.buffer' */
@@ -1218,11 +1152,9 @@ static char __pyx_k_zeros_like[] = "zeros_like";
 static char __pyx_k_permutation[] = "permutation";
 static char __pyx_k_RuntimeError[] = "RuntimeError";
 static char __pyx_k_unpacked_idx[] = "unpacked_idx";
-static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
-static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer";
 static char __pyx_k_skbio_stats___subsample[] = "skbio.stats.__subsample";
 static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
-static char __pyx_k_Users_jairideout_dev_scikit_bio[] = "/Users/jairideout/dev/scikit-bio/skbio/stats/__subsample.pyx";
+static char __pyx_k_home_evan_biocore_scikit_bio_sk[] = "/home/evan/biocore/scikit-bio/skbio/stats/__subsample.pyx";
 static char __pyx_k_subsample_counts_without_replac[] = "_subsample_counts_without_replacement";
 static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
 static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
@@ -1233,13 +1165,13 @@ static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
 static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
 static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
 static PyObject *__pyx_n_s_RuntimeError;
-static PyObject *__pyx_kp_s_Users_jairideout_dev_scikit_bio;
 static PyObject *__pyx_n_s_ValueError;
 static PyObject *__pyx_n_s_cnt;
 static PyObject *__pyx_n_s_counts;
 static PyObject *__pyx_n_s_counts_sum;
 static PyObject *__pyx_n_s_dtype;
 static PyObject *__pyx_n_s_empty;
+static PyObject *__pyx_kp_s_home_evan_biocore_scikit_bio_sk;
 static PyObject *__pyx_n_s_i;
 static PyObject *__pyx_n_s_idx;
 static PyObject *__pyx_n_s_import;
@@ -1252,8 +1184,6 @@ static PyObject *__pyx_n_s_np;
 static PyObject *__pyx_n_s_numpy;
 static PyObject *__pyx_n_s_permutation;
 static PyObject *__pyx_n_s_permuted;
-static PyObject *__pyx_n_s_pyx_getbuffer;
-static PyObject *__pyx_n_s_pyx_releasebuffer;
 static PyObject *__pyx_n_s_random;
 static PyObject *__pyx_n_s_range;
 static PyObject *__pyx_n_s_result;
@@ -1283,7 +1213,7 @@ static PyObject *__pyx_codeobj__8;
 
 /* Python wrapper */
 static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static PyMethodDef __pyx_mdef_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement = {__Pyx_NAMESTR("_subsample_counts_without_replacement"), (PyCFunction)__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)};
+static PyMethodDef __pyx_mdef_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement = {"_subsample_counts_without_replacement", (PyCFunction)__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement, METH_VARARGS|METH_KEYWORDS, 0};
 static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
   PyArrayObject *__pyx_v_counts = 0;
   PyObject *__pyx_v_n = 0;
@@ -1553,26 +1483,42 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
  * 
  *     result = np.zeros_like(counts)
  */
-  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_random); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_permutation); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_INCREF(((PyObject *)__pyx_v_unpacked));
-  PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_unpacked));
-  __Pyx_GIVEREF(((PyObject *)__pyx_v_unpacked));
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_t_1, 0, 0, NULL, &__pyx_v_n, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_permutation); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  __pyx_t_1 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_3))) {
+    __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3);
+    if (likely(__pyx_t_1)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+      __Pyx_INCREF(__pyx_t_1);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_3, function);
+    }
+  }
+  if (!__pyx_t_1) {
+    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_3, ((PyObject *)__pyx_v_unpacked)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+  } else {
+    __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_2);
+    PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = NULL;
+    __Pyx_INCREF(((PyObject *)__pyx_v_unpacked));
+    PyTuple_SET_ITEM(__pyx_t_2, 0+1, ((PyObject *)__pyx_v_unpacked));
+    __Pyx_GIVEREF(((PyObject *)__pyx_v_unpacked));
+    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_4);
+    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+  __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_t_4, 0, 0, NULL, &__pyx_v_n, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_3);
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_5 = ((PyArrayObject *)__pyx_t_3);
   {
@@ -1602,22 +1548,38 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
  *     for idx in range(permuted.shape[0]):
  *         result[permuted[idx]] += 1
  */
-  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros_like); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_INCREF(((PyObject *)__pyx_v_counts));
-  PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_counts));
-  __Pyx_GIVEREF(((PyObject *)__pyx_v_counts));
-  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros_like); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_2);
+  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+  __pyx_t_4 = NULL;
+  if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_2))) {
+    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
+    if (likely(__pyx_t_4)) {
+      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+      __Pyx_INCREF(__pyx_t_4);
+      __Pyx_INCREF(function);
+      __Pyx_DECREF_SET(__pyx_t_2, function);
+    }
+  }
+  if (!__pyx_t_4) {
+    __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, ((PyObject *)__pyx_v_counts)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+  } else {
+    __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_1);
+    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL;
+    __Pyx_INCREF(((PyObject *)__pyx_v_counts));
+    PyTuple_SET_ITEM(__pyx_t_1, 0+1, ((PyObject *)__pyx_v_counts));
+    __Pyx_GIVEREF(((PyObject *)__pyx_v_counts));
+    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  }
+  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_5 = ((PyArrayObject *)__pyx_t_3);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
     __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result.rcbuffer->pybuffer);
@@ -1635,8 +1597,8 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
     if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_5 = 0;
-  __pyx_v_result = ((PyArrayObject *)__pyx_t_4);
-  __pyx_t_4 = 0;
+  __pyx_v_result = ((PyArrayObject *)__pyx_t_3);
+  __pyx_t_3 = 0;
 
   /* "skbio/stats/__subsample.pyx":33
  * 
@@ -1727,7 +1689,7 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
  *         # experimental exception made for __getbuffer__ and __releasebuffer__
  *         # -- the details of this may change.
  *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
@@ -1763,13 +1725,11 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __Pyx_RefNannyDeclarations
   int __pyx_t_1;
   int __pyx_t_2;
-  int __pyx_t_3;
-  PyObject *__pyx_t_4 = NULL;
+  PyObject *__pyx_t_3 = NULL;
+  int __pyx_t_4;
   int __pyx_t_5;
-  int __pyx_t_6;
-  int __pyx_t_7;
-  PyObject *__pyx_t_8 = NULL;
-  char *__pyx_t_9;
+  PyObject *__pyx_t_6 = NULL;
+  char *__pyx_t_7;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
@@ -1779,7 +1739,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GIVEREF(__pyx_v_info->obj);
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
  *             # of flags
  * 
  *             if info == NULL: return             # <<<<<<<<<<<<<<
@@ -1792,7 +1752,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     goto __pyx_L0;
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
  * 
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1             # <<<<<<<<<<<<<<
@@ -1801,7 +1761,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_endian_detector = 1;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207
  *             cdef int copy_shape, i, ndim
  *             cdef int endian_detector = 1
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
@@ -1810,7 +1770,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
  *             cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
  * 
  *             ndim = PyArray_NDIM(self)             # <<<<<<<<<<<<<<
@@ -1819,7 +1779,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
  *             ndim = PyArray_NDIM(self)
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -1829,7 +1789,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212
  * 
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 copy_shape = 1             # <<<<<<<<<<<<<<
@@ -1841,7 +1801,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
  *                 copy_shape = 1
  *             else:
  *                 copy_shape = 0             # <<<<<<<<<<<<<<
@@ -1852,83 +1812,87 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   __pyx_L4:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
  *                 copy_shape = 0
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)             # <<<<<<<<<<<<<<
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")
  */
-  __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
-  if (__pyx_t_1) {
+  __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
+  if (__pyx_t_2) {
+  } else {
+    __pyx_t_1 = __pyx_t_2;
+    goto __pyx_L6_bool_binop_done;
+  }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
  * 
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):             # <<<<<<<<<<<<<<
  *                 raise ValueError(u"ndarray is not C contiguous")
  * 
  */
-    __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
-    __pyx_t_3 = __pyx_t_2;
-  } else {
-    __pyx_t_3 = __pyx_t_1;
-  }
-  if (__pyx_t_3) {
+  __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
+  __pyx_t_1 = __pyx_t_2;
+  __pyx_L6_bool_binop_done:;
+  if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  */
-    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_Raise(__pyx_t_4, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
  *                 raise ValueError(u"ndarray is not C contiguous")
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)             # <<<<<<<<<<<<<<
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")
  */
-  __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
-  if (__pyx_t_3) {
+  __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
+  if (__pyx_t_2) {
+  } else {
+    __pyx_t_1 = __pyx_t_2;
+    goto __pyx_L9_bool_binop_done;
+  }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):             # <<<<<<<<<<<<<<
  *                 raise ValueError(u"ndarray is not Fortran contiguous")
  * 
  */
-    __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
-    __pyx_t_2 = __pyx_t_1;
-  } else {
-    __pyx_t_2 = __pyx_t_3;
-  }
-  if (__pyx_t_2) {
+  __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
+  __pyx_t_1 = __pyx_t_2;
+  __pyx_L9_bool_binop_done:;
+  if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
  * 
  *             info.buf = PyArray_DATA(self)
  */
-    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_Raise(__pyx_t_4, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __Pyx_GOTREF(__pyx_t_3);
+    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224
  *                 raise ValueError(u"ndarray is not Fortran contiguous")
  * 
  *             info.buf = PyArray_DATA(self)             # <<<<<<<<<<<<<<
@@ -1937,7 +1901,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225
  * 
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim             # <<<<<<<<<<<<<<
@@ -1946,17 +1910,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->ndim = __pyx_v_ndim;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
  *             info.buf = PyArray_DATA(self)
  *             info.ndim = ndim
  *             if copy_shape:             # <<<<<<<<<<<<<<
  *                 # Allocate new buffer for strides and shape info.
  *                 # This is allocated as one block, strides first.
  */
-  __pyx_t_2 = (__pyx_v_copy_shape != 0);
-  if (__pyx_t_2) {
+  __pyx_t_1 = (__pyx_v_copy_shape != 0);
+  if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
  *                 # Allocate new buffer for strides and shape info.
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)             # <<<<<<<<<<<<<<
@@ -1965,7 +1929,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
  *                 # This is allocated as one block, strides first.
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim             # <<<<<<<<<<<<<<
@@ -1974,18 +1938,18 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231
  *                 info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):             # <<<<<<<<<<<<<<
  *                     info.strides[i] = PyArray_STRIDES(self)[i]
  *                     info.shape[i] = PyArray_DIMS(self)[i]
  */
-    __pyx_t_5 = __pyx_v_ndim;
-    for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
-      __pyx_v_i = __pyx_t_6;
+    __pyx_t_4 = __pyx_v_ndim;
+    for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
+      __pyx_v_i = __pyx_t_5;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
  *                 info.shape = info.strides + ndim
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]             # <<<<<<<<<<<<<<
@@ -1994,7 +1958,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
  *                 for i in range(ndim):
  *                     info.strides[i] = PyArray_STRIDES(self)[i]
  *                     info.shape[i] = PyArray_DIMS(self)[i]             # <<<<<<<<<<<<<<
@@ -2003,11 +1967,11 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
       (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
     }
-    goto __pyx_L7;
+    goto __pyx_L11;
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
  *                     info.shape[i] = PyArray_DIMS(self)[i]
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)             # <<<<<<<<<<<<<<
@@ -2016,7 +1980,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
  *             else:
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)             # <<<<<<<<<<<<<<
@@ -2025,9 +1989,9 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
   }
-  __pyx_L7:;
+  __pyx_L11:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237
  *                 info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL             # <<<<<<<<<<<<<<
@@ -2036,7 +2000,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->suboffsets = NULL;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238
  *                 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)             # <<<<<<<<<<<<<<
@@ -2045,7 +2009,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
  *             info.suboffsets = NULL
  *             info.itemsize = PyArray_ITEMSIZE(self)
  *             info.readonly = not PyArray_ISWRITEABLE(self)             # <<<<<<<<<<<<<<
@@ -2054,7 +2018,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242
  * 
  *             cdef int t
  *             cdef char* f = NULL             # <<<<<<<<<<<<<<
@@ -2063,19 +2027,19 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_f = NULL;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243
  *             cdef int t
  *             cdef char* f = NULL
  *             cdef dtype descr = self.descr             # <<<<<<<<<<<<<<
  *             cdef list stack
  *             cdef int offset
  */
-  __pyx_t_4 = ((PyObject *)__pyx_v_self->descr);
-  __Pyx_INCREF(__pyx_t_4);
-  __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4);
-  __pyx_t_4 = 0;
+  __pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
+  __Pyx_INCREF(__pyx_t_3);
+  __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
+  __pyx_t_3 = 0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":247
  *             cdef int offset
  * 
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)             # <<<<<<<<<<<<<<
@@ -2084,7 +2048,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":249
  *             cdef bint hasfields = PyDataType_HASFIELDS(descr)
  * 
  *             if not hasfields and not copy_shape:             # <<<<<<<<<<<<<<
@@ -2093,14 +2057,16 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
   __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
   if (__pyx_t_2) {
-    __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0);
-    __pyx_t_1 = __pyx_t_3;
   } else {
     __pyx_t_1 = __pyx_t_2;
+    goto __pyx_L15_bool_binop_done;
   }
+  __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
+  __pyx_t_1 = __pyx_t_2;
+  __pyx_L15_bool_binop_done:;
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
  *             if not hasfields and not copy_shape:
  *                 # do not call releasebuffer
  *                 info.obj = None             # <<<<<<<<<<<<<<
@@ -2112,11 +2078,11 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_GOTREF(__pyx_v_info->obj);
     __Pyx_DECREF(__pyx_v_info->obj);
     __pyx_v_info->obj = Py_None;
-    goto __pyx_L10;
+    goto __pyx_L14;
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
  *             else:
  *                 # need to call releasebuffer
  *                 info.obj = self             # <<<<<<<<<<<<<<
@@ -2129,9 +2095,9 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     __Pyx_DECREF(__pyx_v_info->obj);
     __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
   }
-  __pyx_L10:;
+  __pyx_L14:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
  *                 info.obj = self
  * 
  *             if not hasfields:             # <<<<<<<<<<<<<<
@@ -2141,66 +2107,69 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
  * 
  *             if not hasfields:
  *                 t = descr.type_num             # <<<<<<<<<<<<<<
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  */
-    __pyx_t_5 = __pyx_v_descr->type_num;
-    __pyx_v_t = __pyx_t_5;
+    __pyx_t_4 = __pyx_v_descr->type_num;
+    __pyx_v_t = __pyx_t_4;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
  *             if not hasfields:
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")
  */
-    __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0);
-    if (__pyx_t_1) {
-      __pyx_t_2 = (__pyx_v_little_endian != 0);
+    __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
+    if (!__pyx_t_2) {
+      goto __pyx_L20_next_or;
     } else {
-      __pyx_t_2 = __pyx_t_1;
     }
+    __pyx_t_2 = (__pyx_v_little_endian != 0);
     if (!__pyx_t_2) {
+    } else {
+      __pyx_t_1 = __pyx_t_2;
+      goto __pyx_L19_bool_binop_done;
+    }
+    __pyx_L20_next_or:;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
  *                 t = descr.type_num
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"
  */
-      __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0);
-      if (__pyx_t_1) {
-        __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0);
-        __pyx_t_7 = __pyx_t_3;
-      } else {
-        __pyx_t_7 = __pyx_t_1;
-      }
-      __pyx_t_1 = __pyx_t_7;
+    __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
+    if (__pyx_t_2) {
     } else {
       __pyx_t_1 = __pyx_t_2;
+      goto __pyx_L19_bool_binop_done;
     }
+    __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
+    __pyx_t_1 = __pyx_t_2;
+    __pyx_L19_bool_binop_done:;
     if (__pyx_t_1) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-      __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_4);
-      __Pyx_Raise(__pyx_t_4, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
  *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
@@ -2209,7 +2178,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     switch (__pyx_v_t) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"             # <<<<<<<<<<<<<<
@@ -2220,7 +2189,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_b;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
  *                     raise ValueError(u"Non-native byte order not supported")
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"             # <<<<<<<<<<<<<<
@@ -2231,7 +2200,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_B;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"             # <<<<<<<<<<<<<<
@@ -2242,7 +2211,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_h;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
  *                 elif t == NPY_UBYTE:       f = "B"
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"             # <<<<<<<<<<<<<<
@@ -2253,7 +2222,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_H;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
  *                 elif t == NPY_SHORT:       f = "h"
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"             # <<<<<<<<<<<<<<
@@ -2264,7 +2233,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_i;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
  *                 elif t == NPY_USHORT:      f = "H"
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"             # <<<<<<<<<<<<<<
@@ -2275,7 +2244,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_I;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
  *                 elif t == NPY_INT:         f = "i"
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"             # <<<<<<<<<<<<<<
@@ -2286,7 +2255,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_l;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
  *                 elif t == NPY_UINT:        f = "I"
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"             # <<<<<<<<<<<<<<
@@ -2297,7 +2266,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_L;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
  *                 elif t == NPY_LONG:        f = "l"
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"             # <<<<<<<<<<<<<<
@@ -2308,7 +2277,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_q;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
  *                 elif t == NPY_ULONG:       f = "L"
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"             # <<<<<<<<<<<<<<
@@ -2319,7 +2288,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Q;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
  *                 elif t == NPY_LONGLONG:    f = "q"
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"             # <<<<<<<<<<<<<<
@@ -2330,7 +2299,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_f;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
  *                 elif t == NPY_ULONGLONG:   f = "Q"
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"             # <<<<<<<<<<<<<<
@@ -2341,7 +2310,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_d;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
  *                 elif t == NPY_FLOAT:       f = "f"
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"             # <<<<<<<<<<<<<<
@@ -2352,7 +2321,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_g;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
  *                 elif t == NPY_DOUBLE:      f = "d"
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"             # <<<<<<<<<<<<<<
@@ -2363,7 +2332,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zf;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
  *                 elif t == NPY_LONGDOUBLE:  f = "g"
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"             # <<<<<<<<<<<<<<
@@ -2374,7 +2343,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zd;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
  *                 elif t == NPY_CFLOAT:      f = "Zf"
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"             # <<<<<<<<<<<<<<
@@ -2385,7 +2354,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       __pyx_v_f = __pyx_k_Zg;
       break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
  *                 elif t == NPY_CDOUBLE:     f = "Zd"
  *                 elif t == NPY_CLONGDOUBLE: f = "Zg"
  *                 elif t == NPY_OBJECT:      f = "O"             # <<<<<<<<<<<<<<
@@ -2397,33 +2366,33 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
       break;
       default:
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
  *                 elif t == NPY_OBJECT:      f = "O"
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *                 info.format = f
  *                 return
  */
-      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_8);
-      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_4);
-      PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8);
-      __Pyx_GIVEREF(__pyx_t_8);
-      __pyx_t_8 = 0;
-      __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __Pyx_GOTREF(__pyx_t_8);
-      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __Pyx_Raise(__pyx_t_8, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_6);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_3);
+      PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
+      __Pyx_GIVEREF(__pyx_t_6);
+      __pyx_t_6 = 0;
+      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_GOTREF(__pyx_t_6);
+      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+      __Pyx_Raise(__pyx_t_6, 0, 0, 0);
+      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       break;
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
  *                 else:
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f             # <<<<<<<<<<<<<<
@@ -2432,7 +2401,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->format = __pyx_v_f;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
  *                     raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *                 info.format = f
  *                 return             # <<<<<<<<<<<<<<
@@ -2444,7 +2413,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
  *                 return
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)             # <<<<<<<<<<<<<<
@@ -2453,7 +2422,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_info->format = ((char *)malloc(255));
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
  *             else:
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment             # <<<<<<<<<<<<<<
@@ -2462,7 +2431,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     (__pyx_v_info->format[0]) = '^';
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
  *                 info.format = <char*>stdlib.malloc(_buffer_format_string_len)
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0             # <<<<<<<<<<<<<<
@@ -2471,17 +2440,17 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
  */
     __pyx_v_offset = 0;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
  *                 info.format[0] = c'^' # Native data types, manual alignment
  *                 offset = 0
  *                 f = _util_dtypestring(descr, info.format + 1,             # <<<<<<<<<<<<<<
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  */
-    __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-    __pyx_v_f = __pyx_t_9;
+    __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_v_f = __pyx_t_7;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
  *                                       info.format + _buffer_format_string_len,
  *                                       &offset)
  *                 f[0] = c'\0' # Terminate format string             # <<<<<<<<<<<<<<
@@ -2491,7 +2460,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
     (__pyx_v_f[0]) = '\x00';
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
  *         # experimental exception made for __getbuffer__ and __releasebuffer__
  *         # -- the details of this may change.
  *         def __getbuffer__(ndarray self, Py_buffer* info, int flags):             # <<<<<<<<<<<<<<
@@ -2503,8 +2472,8 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   __pyx_r = 0;
   goto __pyx_L0;
   __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_8);
+  __Pyx_XDECREF(__pyx_t_3);
+  __Pyx_XDECREF(__pyx_t_6);
   __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __pyx_r = -1;
   if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
@@ -2523,7 +2492,7 @@ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, P
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -2547,7 +2516,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("__releasebuffer__", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):             # <<<<<<<<<<<<<<
@@ -2557,7 +2526,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
  *         def __releasebuffer__(ndarray self, Py_buffer* info):
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)             # <<<<<<<<<<<<<<
@@ -2569,7 +2538,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   }
   __pyx_L3:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294
  *             if PyArray_HASFIELDS(self):
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):             # <<<<<<<<<<<<<<
@@ -2579,7 +2548,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":295
  *                 stdlib.free(info.format)
  *             if sizeof(npy_intp) != sizeof(Py_ssize_t):
  *                 stdlib.free(info.strides)             # <<<<<<<<<<<<<<
@@ -2591,7 +2560,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   }
   __pyx_L4:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
  *                 f[0] = c'\0' # Terminate format string
  * 
  *         def __releasebuffer__(ndarray self, Py_buffer* info):             # <<<<<<<<<<<<<<
@@ -2603,7 +2572,7 @@ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_s
   __Pyx_RefNannyFinishContext();
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -2620,7 +2589,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
  * 
  * cdef inline object PyArray_MultiIterNew1(a):
  *     return PyArray_MultiIterNew(1, <void*>a)             # <<<<<<<<<<<<<<
@@ -2628,13 +2597,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
  * cdef inline object PyArray_MultiIterNew2(a, b):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
  * ctypedef npy_cdouble     complex_t
  * 
  * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
@@ -2653,7 +2622,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -2670,7 +2639,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)             # <<<<<<<<<<<<<<
@@ -2678,13 +2647,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
  *     return PyArray_MultiIterNew(1, <void*>a)
  * 
  * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
@@ -2703,7 +2672,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -2720,7 +2689,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)             # <<<<<<<<<<<<<<
@@ -2728,13 +2697,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
  *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
  * 
  * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
@@ -2753,7 +2722,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -2770,7 +2739,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)             # <<<<<<<<<<<<<<
@@ -2778,13 +2747,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
  *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
  * 
  * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
@@ -2803,7 +2772,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -2820,7 +2789,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":784
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)             # <<<<<<<<<<<<<<
@@ -2828,13 +2797,13 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 784; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __pyx_r = __pyx_t_1;
   __pyx_t_1 = 0;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
  *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
  * 
  * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
@@ -2853,7 +2822,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -2878,16 +2847,14 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   int __pyx_t_5;
   int __pyx_t_6;
   int __pyx_t_7;
-  int __pyx_t_8;
-  int __pyx_t_9;
-  long __pyx_t_10;
-  char *__pyx_t_11;
+  long __pyx_t_8;
+  char *__pyx_t_9;
   int __pyx_lineno = 0;
   const char *__pyx_filename = NULL;
   int __pyx_clineno = 0;
   __Pyx_RefNannySetupContext("_util_dtypestring", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":793
  *     cdef int delta_offset
  *     cdef tuple i
  *     cdef int endian_detector = 1             # <<<<<<<<<<<<<<
@@ -2896,7 +2863,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   __pyx_v_endian_detector = 1;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
  *     cdef tuple i
  *     cdef int endian_detector = 1
  *     cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)             # <<<<<<<<<<<<<<
@@ -2905,7 +2872,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
  *     cdef tuple fields
  * 
  *     for childname in descr.names:             # <<<<<<<<<<<<<<
@@ -2914,33 +2881,37 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
   if (unlikely(__pyx_v_descr->names == Py_None)) {
     PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
-    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
   for (;;) {
     if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
     #if CYTHON_COMPILING_IN_CPYTHON
-    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     #else
-    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     #endif
     __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
     __pyx_t_3 = 0;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
  * 
  *     for childname in descr.names:
  *         fields = descr.fields[childname]             # <<<<<<<<<<<<<<
  *         child, new_offset = fields
  * 
  */
-    __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    if (unlikely(__pyx_v_descr->fields == Py_None)) {
+      PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    }
+    __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
     __Pyx_GOTREF(__pyx_t_3);
-    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
     __pyx_t_3 = 0;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
  *     for childname in descr.names:
  *         fields = descr.fields[childname]
  *         child, new_offset = fields             # <<<<<<<<<<<<<<
@@ -2957,7 +2928,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       if (unlikely(size != 2)) {
         if (size > 2) __Pyx_RaiseTooManyValuesError(2);
         else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       #if CYTHON_COMPILING_IN_CPYTHON
       __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); 
@@ -2965,101 +2936,104 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __Pyx_INCREF(__pyx_t_3);
       __Pyx_INCREF(__pyx_t_4);
       #else
-      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       #endif
     } else {
-      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
-    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
     __pyx_t_3 = 0;
     __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
     __pyx_t_4 = 0;
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
  *         child, new_offset = fields
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:             # <<<<<<<<<<<<<<
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  */
-    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
     __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
     if (__pyx_t_6) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":804
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
  * 
  *         if ((child.byteorder == c'>' and little_endian) or             # <<<<<<<<<<<<<<
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")
  */
-    __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0);
-    if (__pyx_t_6) {
-      __pyx_t_7 = (__pyx_v_little_endian != 0);
+    __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
+    if (!__pyx_t_7) {
+      goto __pyx_L8_next_or;
     } else {
-      __pyx_t_7 = __pyx_t_6;
     }
+    __pyx_t_7 = (__pyx_v_little_endian != 0);
     if (!__pyx_t_7) {
+    } else {
+      __pyx_t_6 = __pyx_t_7;
+      goto __pyx_L7_bool_binop_done;
+    }
+    __pyx_L8_next_or:;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":805
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):             # <<<<<<<<<<<<<<
  *             raise ValueError(u"Non-native byte order not supported")
  *             # One could encode it in the format string and have Cython
  */
-      __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0);
-      if (__pyx_t_6) {
-        __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0);
-        __pyx_t_9 = __pyx_t_8;
-      } else {
-        __pyx_t_9 = __pyx_t_6;
-      }
-      __pyx_t_6 = __pyx_t_9;
+    __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
+    if (__pyx_t_7) {
     } else {
       __pyx_t_6 = __pyx_t_7;
+      goto __pyx_L7_bool_binop_done;
     }
+    __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
+    __pyx_t_6 = __pyx_t_7;
+    __pyx_L7_bool_binop_done:;
     if (__pyx_t_6) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
       __Pyx_Raise(__pyx_t_3, 0, 0, 0);
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
  * 
  *         # Output padding bytes
  *         while offset[0] < new_offset:             # <<<<<<<<<<<<<<
@@ -3067,15 +3041,15 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             f += 1
  */
     while (1) {
-      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (!__pyx_t_6) break;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":817
  *         # Output padding bytes
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte             # <<<<<<<<<<<<<<
@@ -3084,7 +3058,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
       (__pyx_v_f[0]) = 120;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
  *         while offset[0] < new_offset:
  *             f[0] = 120 # "x"; pad byte
  *             f += 1             # <<<<<<<<<<<<<<
@@ -3093,28 +3067,28 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  */
       __pyx_v_f = (__pyx_v_f + 1);
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":819
  *             f[0] = 120 # "x"; pad byte
  *             f += 1
  *             offset[0] += 1             # <<<<<<<<<<<<<<
  * 
  *         offset[0] += child.itemsize
  */
-      __pyx_t_10 = 0;
-      (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1);
+      __pyx_t_8 = 0;
+      (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
     }
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
  *             offset[0] += 1
  * 
  *         offset[0] += child.itemsize             # <<<<<<<<<<<<<<
  * 
  *         if not PyDataType_HASFIELDS(child):
  */
-    __pyx_t_10 = 0;
-    (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize);
+    __pyx_t_8 = 0;
+    (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
  *         offset[0] += child.itemsize
  * 
  *         if not PyDataType_HASFIELDS(child):             # <<<<<<<<<<<<<<
@@ -3124,19 +3098,19 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
     __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
     if (__pyx_t_6) {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
  * 
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num             # <<<<<<<<<<<<<<
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")
  */
-      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
       __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
       __pyx_t_4 = 0;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":825
  *         if not PyDataType_HASFIELDS(child):
  *             t = child.type_num
  *             if end - f < 5:             # <<<<<<<<<<<<<<
@@ -3146,357 +3120,357 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
       __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
       if (__pyx_t_6) {
 
-        /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+        /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
         __Pyx_Raise(__pyx_t_4, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 98;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"             # <<<<<<<<<<<<<<
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 66;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
  *             if   t == NPY_BYTE:        f[0] =  98 #"b"
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"             # <<<<<<<<<<<<<<
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 104;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
  *             elif t == NPY_UBYTE:       f[0] =  66 #"B"
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"             # <<<<<<<<<<<<<<
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 72;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
  *             elif t == NPY_SHORT:       f[0] = 104 #"h"
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"             # <<<<<<<<<<<<<<
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 105;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
  *             elif t == NPY_USHORT:      f[0] =  72 #"H"
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 73;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
  *             elif t == NPY_INT:         f[0] = 105 #"i"
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 108;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
  *             elif t == NPY_UINT:        f[0] =  73 #"I"
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 76;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
  *             elif t == NPY_LONG:        f[0] = 108 #"l"
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 113;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
  *             elif t == NPY_ULONG:       f[0] = 76  #"L"
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"             # <<<<<<<<<<<<<<
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 81;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
  *             elif t == NPY_LONGLONG:    f[0] = 113 #"q"
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"             # <<<<<<<<<<<<<<
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 102;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
  *             elif t == NPY_ULONGLONG:   f[0] = 81  #"Q"
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"             # <<<<<<<<<<<<<<
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 100;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
  *             elif t == NPY_FLOAT:       f[0] = 102 #"f"
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"             # <<<<<<<<<<<<<<
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 103;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
  *             elif t == NPY_DOUBLE:      f[0] = 100 #"d"
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf             # <<<<<<<<<<<<<<
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
         (__pyx_v_f[1]) = 102;
         __pyx_v_f = (__pyx_v_f + 1);
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":843
  *             elif t == NPY_LONGDOUBLE:  f[0] = 103 #"g"
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd             # <<<<<<<<<<<<<<
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
         (__pyx_v_f[1]) = 100;
         __pyx_v_f = (__pyx_v_f + 1);
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
  *             elif t == NPY_CFLOAT:      f[0] = 90; f[1] = 102; f += 1 # Zf
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg             # <<<<<<<<<<<<<<
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  */
-      __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 90;
         (__pyx_v_f[1]) = 103;
         __pyx_v_f = (__pyx_v_f + 1);
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
  *             elif t == NPY_CDOUBLE:     f[0] = 90; f[1] = 100; f += 1 # Zd
  *             elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"             # <<<<<<<<<<<<<<
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  */
-      __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_GOTREF(__pyx_t_4);
-      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
       if (__pyx_t_6) {
         (__pyx_v_f[0]) = 79;
-        goto __pyx_L11;
+        goto __pyx_L15;
       }
       /*else*/ {
 
-        /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
+        /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":847
  *             elif t == NPY_OBJECT:      f[0] = 79 #"O"
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)             # <<<<<<<<<<<<<<
  *             f += 1
  *         else:
  */
-        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
-        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_4);
         PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
         __Pyx_GIVEREF(__pyx_t_3);
         __pyx_t_3 = 0;
-        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         __Pyx_GOTREF(__pyx_t_3);
         __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
         __Pyx_Raise(__pyx_t_3, 0, 0, 0);
         __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
-      __pyx_L11:;
+      __pyx_L15:;
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":848
  *             else:
  *                 raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
  *             f += 1             # <<<<<<<<<<<<<<
@@ -3504,25 +3478,33 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
  *             # Cython ignores struct boundary information ("T{...}"),
  */
       __pyx_v_f = (__pyx_v_f + 1);
-      goto __pyx_L9;
+      goto __pyx_L13;
     }
     /*else*/ {
 
-      /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849
+      /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":852
  *             # Cython ignores struct boundary information ("T{...}"),
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)             # <<<<<<<<<<<<<<
  *     return f
  * 
  */
-      __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-      __pyx_v_f = __pyx_t_11;
+      __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      __pyx_v_f = __pyx_t_9;
     }
-    __pyx_L9:;
+    __pyx_L13:;
+
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
+ *     cdef tuple fields
+ * 
+ *     for childname in descr.names:             # <<<<<<<<<<<<<<
+ *         fields = descr.fields[childname]
+ *         child, new_offset = fields
+ */
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":853
  *             # so don't output it
  *             f = _util_dtypestring(child, f, end, offset)
  *     return f             # <<<<<<<<<<<<<<
@@ -3532,7 +3514,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   __pyx_r = __pyx_v_f;
   goto __pyx_L0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
  *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
  * 
  * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:             # <<<<<<<<<<<<<<
@@ -3557,7 +3539,7 @@ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx
   return __pyx_r;
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -3572,7 +3554,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   int __pyx_t_2;
   __Pyx_RefNannySetupContext("set_array_base", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
  * cdef inline void set_array_base(ndarray arr, object base):
  *      cdef PyObject* baseptr
  *      if base is None:             # <<<<<<<<<<<<<<
@@ -3583,7 +3565,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __pyx_t_2 = (__pyx_t_1 != 0);
   if (__pyx_t_2) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
  *      cdef PyObject* baseptr
  *      if base is None:
  *          baseptr = NULL             # <<<<<<<<<<<<<<
@@ -3595,7 +3577,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
  *          baseptr = NULL
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!             # <<<<<<<<<<<<<<
@@ -3604,7 +3586,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
     Py_INCREF(__pyx_v_base);
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":975
  *      else:
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base             # <<<<<<<<<<<<<<
@@ -3615,7 +3597,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   }
   __pyx_L3:;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
  *          Py_INCREF(base) # important to do this before decref below!
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)             # <<<<<<<<<<<<<<
@@ -3624,7 +3606,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   Py_XDECREF(__pyx_v_arr->base);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
  *          baseptr = <PyObject*>base
  *      Py_XDECREF(arr.base)
  *      arr.base = baseptr             # <<<<<<<<<<<<<<
@@ -3633,7 +3615,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
  */
   __pyx_v_arr->base = __pyx_v_baseptr;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
  * 
  * 
  * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
@@ -3645,7 +3627,7 @@ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_a
   __Pyx_RefNannyFinishContext();
 }
 
-/* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+/* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -3659,7 +3641,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   int __pyx_t_1;
   __Pyx_RefNannySetupContext("get_array_base", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
  * 
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:             # <<<<<<<<<<<<<<
@@ -3669,7 +3651,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
   if (__pyx_t_1) {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":981
  * cdef inline object get_array_base(ndarray arr):
  *     if arr.base is NULL:
  *         return None             # <<<<<<<<<<<<<<
@@ -3683,7 +3665,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
   }
   /*else*/ {
 
-    /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
+    /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":983
  *         return None
  *     else:
  *         return <object>arr.base             # <<<<<<<<<<<<<<
@@ -3694,7 +3676,7 @@ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__py
     goto __pyx_L0;
   }
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
@@ -3720,7 +3702,7 @@ static struct PyModuleDef __pyx_moduledef = {
   #else
     PyModuleDef_HEAD_INIT,
   #endif
-    __Pyx_NAMESTR("__subsample"),
+    "__subsample",
     0, /* m_doc */
     -1, /* m_size */
     __pyx_methods /* m_methods */,
@@ -3736,13 +3718,13 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
   {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
   {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
-  {&__pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_k_Users_jairideout_dev_scikit_bio, sizeof(__pyx_k_Users_jairideout_dev_scikit_bio), 0, 0, 1, 0},
   {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
   {&__pyx_n_s_cnt, __pyx_k_cnt, sizeof(__pyx_k_cnt), 0, 0, 1, 1},
   {&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1},
   {&__pyx_n_s_counts_sum, __pyx_k_counts_sum, sizeof(__pyx_k_counts_sum), 0, 0, 1, 1},
   {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
   {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1},
+  {&__pyx_kp_s_home_evan_biocore_scikit_bio_sk, __pyx_k_home_evan_biocore_scikit_bio_sk, sizeof(__pyx_k_home_evan_biocore_scikit_bio_sk), 0, 0, 1, 0},
   {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
   {&__pyx_n_s_idx, __pyx_k_idx, sizeof(__pyx_k_idx), 0, 0, 1, 1},
   {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
@@ -3755,8 +3737,6 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
   {&__pyx_n_s_permutation, __pyx_k_permutation, sizeof(__pyx_k_permutation), 0, 0, 1, 1},
   {&__pyx_n_s_permuted, __pyx_k_permuted, sizeof(__pyx_k_permuted), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1},
   {&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1},
   {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
   {&__pyx_n_s_result, __pyx_k_result, sizeof(__pyx_k_result), 0, 0, 1, 1},
@@ -3771,8 +3751,8 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
 };
 static int __Pyx_InitCachedBuiltins(void) {
   __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   return 0;
   __pyx_L1_error:;
   return -1;
@@ -3782,69 +3762,69 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_RefNannyDeclarations
   __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
  *             if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not C contiguous")             # <<<<<<<<<<<<<<
  * 
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  */
-  __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple_);
   __Pyx_GIVEREF(__pyx_tuple_);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
  *             if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
  *                 and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
  *                 raise ValueError(u"ndarray is not Fortran contiguous")             # <<<<<<<<<<<<<<
  * 
  *             info.buf = PyArray_DATA(self)
  */
-  __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__2);
   __Pyx_GIVEREF(__pyx_tuple__2);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
  *                 if ((descr.byteorder == c'>' and little_endian) or
  *                     (descr.byteorder == c'<' and not little_endian)):
  *                     raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *                 if   t == NPY_BYTE:        f = "b"
  *                 elif t == NPY_UBYTE:       f = "B"
  */
-  __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__3);
   __Pyx_GIVEREF(__pyx_tuple__3);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
  * 
  *         if (end - f) - <int>(new_offset - offset[0]) < 15:
  *             raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")             # <<<<<<<<<<<<<<
  * 
  *         if ((child.byteorder == c'>' and little_endian) or
  */
-  __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__4);
   __Pyx_GIVEREF(__pyx_tuple__4);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
  *         if ((child.byteorder == c'>' and little_endian) or
  *             (child.byteorder == c'<' and not little_endian)):
  *             raise ValueError(u"Non-native byte order not supported")             # <<<<<<<<<<<<<<
  *             # One could encode it in the format string and have Cython
  *             # complain instead, BUT: < and > in format strings also imply
  */
-  __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__5);
   __Pyx_GIVEREF(__pyx_tuple__5);
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
  *             t = child.type_num
  *             if end - f < 5:
  *                 raise RuntimeError(u"Format string allocated too short.")             # <<<<<<<<<<<<<<
  * 
  *             # Until ticket #99 is fixed, use integers to avoid warnings
  */
-  __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__6);
   __Pyx_GIVEREF(__pyx_tuple__6);
 
@@ -3858,7 +3838,7 @@ static int __Pyx_InitCachedConstants(void) {
   __pyx_tuple__7 = PyTuple_Pack(11, __pyx_n_s_counts, __pyx_n_s_n, __pyx_n_s_counts_sum, __pyx_n_s_result, __pyx_n_s_permuted, __pyx_n_s_unpacked, __pyx_n_s_cnt, __pyx_n_s_unpacked_idx, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_idx); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__7);
   __Pyx_GIVEREF(__pyx_tuple__7);
-  __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_n_s_subsample_counts_without_replac, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_evan_biocore_scikit_bio_sk, __pyx_n_s_subsample_counts_without_replac, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_RefNannyFinishContext();
   return 0;
   __pyx_L1_error:;
@@ -3917,25 +3897,25 @@ PyMODINIT_FUNC PyInit___subsample(void)
   #endif
   /*--- Module creation code ---*/
   #if PY_MAJOR_VERSION < 3
-  __pyx_m = Py_InitModule4(__Pyx_NAMESTR("__subsample"), __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+  __pyx_m = Py_InitModule4("__subsample", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
   #else
   __pyx_m = PyModule_Create(&__pyx_moduledef);
   #endif
   if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   Py_INCREF(__pyx_d);
-  __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #if CYTHON_COMPILING_IN_PYPY
   Py_INCREF(__pyx_b);
   #endif
-  if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+  if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
   /*--- Initialize various global constants etc. ---*/
   if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
   if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
   if (__pyx_module_is_main_skbio__stats____subsample) {
-    if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+    if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
   }
   #if PY_MAJOR_VERSION >= 3
   {
@@ -3962,10 +3942,10 @@ PyMODINIT_FUNC PyInit___subsample(void)
   #endif
   0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   /*--- Variable import code ---*/
   /*--- Function import code ---*/
   /*--- Execution code ---*/
@@ -4004,18 +3984,23 @@ PyMODINIT_FUNC PyInit___subsample(void)
   if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "/Users/jairideout/.virtualenvs/scikit-bio/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+  /* "../../.virtualenvs/skbio/local/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
  *      arr.base = baseptr
  * 
  * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
  *     if arr.base is NULL:
  *         return None
  */
+
+  /*--- Wrapped vars code ---*/
+
   goto __pyx_L0;
   __pyx_L1_error:;
   __Pyx_XDECREF(__pyx_t_1);
   if (__pyx_m) {
-    __Pyx_AddTraceback("init skbio.stats.__subsample", __pyx_clineno, __pyx_lineno, __pyx_filename);
+    if (__pyx_d) {
+      __Pyx_AddTraceback("init skbio.stats.__subsample", __pyx_clineno, __pyx_lineno, __pyx_filename);
+    }
     Py_DECREF(__pyx_m); __pyx_m = 0;
   } else if (!PyErr_Occurred()) {
     PyErr_SetString(PyExc_ImportError, "init skbio.stats.__subsample");
@@ -4029,7 +4014,7 @@ PyMODINIT_FUNC PyInit___subsample(void)
   #endif
 }
 
-/* Runtime support code */
+/* --- Runtime support code --- */
 #if CYTHON_REFNANNY
 static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
     PyObject *m = NULL, *p = NULL;
@@ -4044,7 +4029,7 @@ end:
     Py_XDECREF(m);
     return (__Pyx_RefNannyAPIStruct *)r;
 }
-#endif /* CYTHON_REFNANNY */
+#endif
 
 static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
     PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
@@ -4272,7 +4257,7 @@ static int __Pyx_BufFmt_ParseNumber(const char** ts) {
 }
 static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
     int number = __Pyx_BufFmt_ParseNumber(ts);
-    if (number == -1) /* First char was not a digit */
+    if (number == -1)
         PyErr_Format(PyExc_ValueError,\
                      "Does not understand character buffer dtype format string ('%c')", **ts);
     return number;
@@ -4517,7 +4502,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
     ctx->fmt_offset += size;
     if (arraysize)
       ctx->fmt_offset += (arraysize - 1) * size;
-    --ctx->enc_count; /* Consume from buffer string */
+    --ctx->enc_count;
     while (1) {
       if (field == &ctx->root) {
         ctx->head = NULL;
@@ -4525,7 +4510,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
           __Pyx_BufFmt_RaiseExpected(ctx);
           return -1;
         }
-        break; /* breaks both loops as ctx->enc_count == 0 */
+        break;
       }
       ctx->head->field = ++field;
       if (field->type == NULL) {
@@ -4534,7 +4519,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
         continue;
       } else if (field->type->typegroup == 'S') {
         size_t parent_offset = ctx->head->parent_offset + field->offset;
-        if (field->type->fields->type == NULL) continue; /* empty struct */
+        if (field->type->fields->type == NULL) continue;
         field = field->type->fields;
         ++ctx->head;
         ctx->head->field = field;
@@ -4566,7 +4551,7 @@ __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
     while (*ts && *ts != ')') {
         switch (*ts) {
             case ' ': case '\f': case '\r': case '\n': case '\t': case '\v':  continue;
-            default:  break;  /* not a 'break' in the loop */
+            default:  break;
         }
         number = __Pyx_BufFmt_ExpectNumber(&ts);
         if (number == -1) return NULL;
@@ -4635,7 +4620,7 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
       case '^':
         ctx->new_packmode = *ts++;
         break;
-      case 'T': /* substruct */
+      case 'T':
         {
           const char* ts_after_sub;
           size_t i, struct_count = ctx->new_count;
@@ -4647,7 +4632,7 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
             return NULL;
           }
           if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-          ctx->enc_type = 0; /* Erase processed last struct element */
+          ctx->enc_type = 0;
           ctx->enc_count = 0;
           ctx->struct_alignment = 0;
           ++ts;
@@ -4660,12 +4645,12 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
           if (struct_alignment) ctx->struct_alignment = struct_alignment;
         }
         break;
-      case '}': /* end of substruct; either repeat or move on */
+      case '}':
         {
           size_t alignment = ctx->struct_alignment;
           ++ts;
           if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-          ctx->enc_type = 0; /* Erase processed last struct element */
+          ctx->enc_type = 0;
           if (alignment && ctx->fmt_offset % alignment) {
             ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
           }
@@ -4777,7 +4762,7 @@ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
     PyObject *result;
 #if CYTHON_COMPILING_IN_CPYTHON
     result = PyDict_GetItem(__pyx_d, name);
-    if (result) {
+    if (likely(result)) {
         Py_INCREF(result);
     } else {
 #else
@@ -4796,14 +4781,10 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg
     ternaryfunc call = func->ob_type->tp_call;
     if (unlikely(!call))
         return PyObject_Call(func, arg, kw);
-#if PY_VERSION_HEX >= 0x02060000
     if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
         return NULL;
-#endif
     result = (*call)(func, arg, kw);
-#if PY_VERSION_HEX >= 0x02060000
     Py_LeaveRecursiveCall();
-#endif
     if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
         PyErr_SetString(
             PyExc_SystemError,
@@ -4835,6 +4816,55 @@ static void __Pyx_RaiseBufferIndexError(int axis) {
      "Out of bounds on buffer access (axis %d)", axis);
 }
 
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
+    PyObject *self, *result;
+    PyCFunction cfunc;
+    cfunc = PyCFunction_GET_FUNCTION(func);
+    self = PyCFunction_GET_SELF(func);
+    if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+        return NULL;
+    result = cfunc(self, arg);
+    Py_LeaveRecursiveCall();
+    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+        PyErr_SetString(
+            PyExc_SystemError,
+            "NULL result without error in PyObject_Call");
+    }
+    return result;
+}
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+    PyObject *result;
+    PyObject *args = PyTuple_New(1);
+    if (unlikely(!args)) return NULL;
+    Py_INCREF(arg);
+    PyTuple_SET_ITEM(args, 0, arg);
+    result = __Pyx_PyObject_Call(func, args, NULL);
+    Py_DECREF(args);
+    return result;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+#ifdef __Pyx_CyFunction_USED
+    if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
+#else
+    if (likely(PyCFunction_Check(func))) {
+#endif
+        if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
+            return __Pyx_PyObject_CallMethO(func, arg);
+        }
+    }
+    return __Pyx__PyObject_CallOneArg(func, arg);
+}
+#else
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+    PyObject* args = PyTuple_Pack(1, arg);
+    return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL;
+}
+#endif
+
 static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
         PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
         PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
@@ -4981,11 +5011,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
             goto raise_error;
         }
     }
-    #if PY_VERSION_HEX < 0x02050000
-    if (PyClass_Check(type)) {
-    #else
     if (PyType_Check(type)) {
-    #endif
 #if CYTHON_COMPILING_IN_PYPY
         if (!value) {
             Py_INCREF(Py_None);
@@ -5000,17 +5026,6 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
             goto raise_error;
         }
         value = type;
-        #if PY_VERSION_HEX < 0x02050000
-        if (PyInstance_Check(type)) {
-            type = (PyObject*) ((PyInstanceObject*)type)->in_class;
-            Py_INCREF(type);
-        } else {
-            type = 0;
-            PyErr_SetString(PyExc_TypeError,
-                "raise: exception must be an old-style class or instance");
-            goto raise_error;
-        }
-        #else
         type = (PyObject*) Py_TYPE(type);
         Py_INCREF(type);
         if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
@@ -5018,7 +5033,6 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
                 "raise: exception class must be a subclass of BaseException");
             goto raise_error;
         }
-        #endif
     }
     __Pyx_ErrRestore(type, value, tb);
     return;
@@ -5028,7 +5042,7 @@ raise_error:
     Py_XDECREF(tb);
     return;
 }
-#else /* Python 3+ */
+#else
 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
     PyObject* owned_instance = NULL;
     if (tb == Py_None) {
@@ -5114,6 +5128,13 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
     }
     PyErr_SetObject(type, value);
     if (tb) {
+#if CYTHON_COMPILING_IN_PYPY
+        PyObject *tmp_type, *tmp_value, *tmp_tb;
+        PyErr_Fetch(tmp_type, tmp_value, tmp_tb);
+        Py_INCREF(tb);
+        PyErr_Restore(tmp_type, tmp_value, tb);
+        Py_XDECREF(tmp_tb);
+#else
         PyThreadState *tstate = PyThreadState_GET();
         PyObject* tmp_tb = tstate->curexc_traceback;
         if (tb != tmp_tb) {
@@ -5121,6 +5142,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
             tstate->curexc_traceback = tb;
             Py_XDECREF(tmp_tb);
         }
+#endif
     }
 bad:
     Py_XDECREF(owned_instance);
@@ -5143,69 +5165,184 @@ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
     PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
 }
 
-#if PY_MAJOR_VERSION < 3
-static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
-  #if PY_VERSION_HEX >= 0x02060000
-    if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
-  #endif
-        if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
-  #if PY_VERSION_HEX < 0x02060000
-    if (obj->ob_type->tp_dict) {
-        PyObject *getbuffer_cobj = PyObject_GetItem(
-            obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer);
-        if (getbuffer_cobj) {
-            getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj);
-            Py_DECREF(getbuffer_cobj);
-            if (!func)
-                goto fail;
-            return func(obj, view, flags);
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+    int start = 0, mid = 0, end = count - 1;
+    if (end >= 0 && code_line > entries[end].code_line) {
+        return count;
+    }
+    while (start < end) {
+        mid = (start + end) / 2;
+        if (code_line < entries[mid].code_line) {
+            end = mid;
+        } else if (code_line > entries[mid].code_line) {
+             start = mid + 1;
         } else {
-            PyErr_Clear();
+            return mid;
         }
     }
-  #endif
-    PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
-#if PY_VERSION_HEX < 0x02060000
-fail:
-#endif
-    return -1;
+    if (code_line <= entries[mid].code_line) {
+        return mid;
+    } else {
+        return mid + 1;
+    }
 }
-static void __Pyx_ReleaseBuffer(Py_buffer *view) {
-    PyObject *obj = view->obj;
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+    PyCodeObject* code_object;
+    int pos;
+    if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+        return NULL;
+    }
+    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+    if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+        return NULL;
+    }
+    code_object = __pyx_code_cache.entries[pos].code_object;
+    Py_INCREF(code_object);
+    return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+    int pos, i;
+    __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+    if (unlikely(!code_line)) {
+        return;
+    }
+    if (unlikely(!entries)) {
+        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+        if (likely(entries)) {
+            __pyx_code_cache.entries = entries;
+            __pyx_code_cache.max_count = 64;
+            __pyx_code_cache.count = 1;
+            entries[0].code_line = code_line;
+            entries[0].code_object = code_object;
+            Py_INCREF(code_object);
+        }
+        return;
+    }
+    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+    if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+        PyCodeObject* tmp = entries[pos].code_object;
+        entries[pos].code_object = code_object;
+        Py_DECREF(tmp);
+        return;
+    }
+    if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+        int new_max = __pyx_code_cache.max_count + 64;
+        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+            __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
+        if (unlikely(!entries)) {
+            return;
+        }
+        __pyx_code_cache.entries = entries;
+        __pyx_code_cache.max_count = new_max;
+    }
+    for (i=__pyx_code_cache.count; i>pos; i--) {
+        entries[i] = entries[i-1];
+    }
+    entries[pos].code_line = code_line;
+    entries[pos].code_object = code_object;
+    __pyx_code_cache.count++;
+    Py_INCREF(code_object);
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+            const char *funcname, int c_line,
+            int py_line, const char *filename) {
+    PyCodeObject *py_code = 0;
+    PyObject *py_srcfile = 0;
+    PyObject *py_funcname = 0;
+    #if PY_MAJOR_VERSION < 3
+    py_srcfile = PyString_FromString(filename);
+    #else
+    py_srcfile = PyUnicode_FromString(filename);
+    #endif
+    if (!py_srcfile) goto bad;
+    if (c_line) {
+        #if PY_MAJOR_VERSION < 3
+        py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+        #else
+        py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+        #endif
+    }
+    else {
+        #if PY_MAJOR_VERSION < 3
+        py_funcname = PyString_FromString(funcname);
+        #else
+        py_funcname = PyUnicode_FromString(funcname);
+        #endif
+    }
+    if (!py_funcname) goto bad;
+    py_code = __Pyx_PyCode_New(
+        0,
+        0,
+        0,
+        0,
+        0,
+        __pyx_empty_bytes, /*PyObject *code,*/
+        __pyx_empty_tuple, /*PyObject *consts,*/
+        __pyx_empty_tuple, /*PyObject *names,*/
+        __pyx_empty_tuple, /*PyObject *varnames,*/
+        __pyx_empty_tuple, /*PyObject *freevars,*/
+        __pyx_empty_tuple, /*PyObject *cellvars,*/
+        py_srcfile,   /*PyObject *filename,*/
+        py_funcname,  /*PyObject *name,*/
+        py_line,
+        __pyx_empty_bytes  /*PyObject *lnotab*/
+    );
+    Py_DECREF(py_srcfile);
+    Py_DECREF(py_funcname);
+    return py_code;
+bad:
+    Py_XDECREF(py_srcfile);
+    Py_XDECREF(py_funcname);
+    return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+                               int py_line, const char *filename) {
+    PyCodeObject *py_code = 0;
+    PyFrameObject *py_frame = 0;
+    py_code = __pyx_find_code_object(c_line ? c_line : py_line);
+    if (!py_code) {
+        py_code = __Pyx_CreateCodeObjectForTraceback(
+            funcname, c_line, py_line, filename);
+        if (!py_code) goto bad;
+        __pyx_insert_code_object(c_line ? c_line : py_line, py_code);
+    }
+    py_frame = PyFrame_New(
+        PyThreadState_GET(), /*PyThreadState *tstate,*/
+        py_code,             /*PyCodeObject *code,*/
+        __pyx_d,      /*PyObject *globals,*/
+        0                    /*PyObject *locals*/
+    );
+    if (!py_frame) goto bad;
+    py_frame->f_lineno = py_line;
+    PyTraceBack_Here(py_frame);
+bad:
+    Py_XDECREF(py_code);
+    Py_XDECREF(py_frame);
+}
+
+#if PY_MAJOR_VERSION < 3
+static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
+    if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
+        if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
+    PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
+    return -1;
+}
+static void __Pyx_ReleaseBuffer(Py_buffer *view) {
+    PyObject *obj = view->obj;
     if (!obj) return;
-  #if PY_VERSION_HEX >= 0x02060000
     if (PyObject_CheckBuffer(obj)) {
         PyBuffer_Release(view);
         return;
     }
-  #endif
         if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; }
-  #if PY_VERSION_HEX < 0x02060000
-    if (obj->ob_type->tp_dict) {
-        PyObject *releasebuffer_cobj = PyObject_GetItem(
-            obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer);
-        if (releasebuffer_cobj) {
-            releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj);
-            Py_DECREF(releasebuffer_cobj);
-            if (!func)
-                goto fail;
-            func(obj, view);
-            return;
-        } else {
-            PyErr_Clear();
-        }
-    }
-  #endif
-    goto nofail;
-#if PY_VERSION_HEX < 0x02060000
-fail:
-#endif
-    PyErr_WriteUnraisable(obj);
-nofail:
     Py_DECREF(obj);
     view->obj = NULL;
 }
-#endif /*  PY_MAJOR_VERSION < 3 */
+#endif
 
 
         static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
@@ -5234,7 +5371,6 @@ nofail:
     empty_dict = PyDict_New();
     if (!empty_dict)
         goto bad;
-    #if PY_VERSION_HEX >= 0x02050000
     {
         #if PY_MAJOR_VERSION >= 3
         if (level == -1) {
@@ -5256,7 +5392,7 @@ nofail:
                     PyErr_Clear();
                 }
             }
-            level = 0; /* try absolute import on failure */
+            level = 0;
         }
         #endif
         if (!module) {
@@ -5273,14 +5409,6 @@ nofail:
             #endif
         }
     }
-    #else
-    if (level>0) {
-        PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
-        goto bad;
-    }
-    module = PyObject_CallFunctionObjArgs(py_import,
-        name, global_dict, empty_dict, list, NULL);
-    #endif
 bad:
     #if PY_VERSION_HEX < 0x03030000
     Py_XDECREF(py_import);
@@ -5342,17 +5470,16 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value) {
     }
 }
 
-#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func)             \
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)       \
     {                                                                     \
-        func_type value = func(x);                                        \
+        func_type value = func_value;                                     \
         if (sizeof(target_type) < sizeof(func_type)) {                    \
             if (unlikely(value != (func_type) (target_type) value)) {     \
                 func_type zero = 0;                                       \
-                PyErr_SetString(PyExc_OverflowError,                      \
-                    (is_unsigned && unlikely(value < zero)) ?             \
-                    "can't convert negative value to " #target_type :     \
-                    "value too large to convert to " #target_type);       \
-                return (target_type) -1;                                  \
+                if (is_unsigned && unlikely(value < zero))                \
+                    goto raise_neg_overflow;                              \
+                else                                                      \
+                    goto raise_overflow;                                  \
             }                                                             \
         }                                                                 \
         return (target_type) value;                                       \
@@ -5363,19 +5490,18 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value) {
   #include "longintrepr.h"
  #endif
 #endif
+
 static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
     const Py_intptr_t neg_one = (Py_intptr_t) -1, const_zero = 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
         if (sizeof(Py_intptr_t) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyInt_AS_LONG)
+            __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to Py_intptr_t");
-                return (Py_intptr_t) -1;
+                goto raise_neg_overflow;
             }
             return (Py_intptr_t) val;
         }
@@ -5385,40 +5511,34 @@ static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
         if (is_unsigned) {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(Py_intptr_t)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return (Py_intptr_t) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (unlikely(Py_SIZE(x) < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to Py_intptr_t");
-                return (Py_intptr_t) -1;
+                goto raise_neg_overflow;
             }
             if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, PyLong_AsUnsignedLong)
+                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, PyLong_AsUnsignedLong(x))
             } else if (sizeof(Py_intptr_t) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long long, PyLong_AsUnsignedLongLong)
+                __PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long long, PyLong_AsUnsignedLongLong(x))
             }
         } else {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(Py_intptr_t)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return +(Py_intptr_t) ((PyLongObject*)x)->ob_digit[0];
-                    case -1: return -(Py_intptr_t) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(Py_intptr_t,  digit, +(((PyLongObject*)x)->ob_digit[0]));
+                case -1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (sizeof(Py_intptr_t) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyLong_AsLong)
+                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyLong_AsLong(x))
             } else if (sizeof(Py_intptr_t) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long long, PyLong_AsLongLong)
+                __PYX_VERIFY_RETURN_INT(Py_intptr_t, long long, PyLong_AsLongLong(x))
             }
         }
         {
@@ -5456,6 +5576,14 @@ static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
         Py_DECREF(tmp);
         return val;
     }
+raise_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "value too large to convert to Py_intptr_t");
+    return (Py_intptr_t) -1;
+raise_neg_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "can't convert negative value to Py_intptr_t");
+    return (Py_intptr_t) -1;
 }
 
 #if CYTHON_CCOMPLEX
@@ -5724,24 +5852,17 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
     }
 }
 
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
 static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
     const int neg_one = (int) -1, const_zero = 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
         if (sizeof(int) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG)
+            __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to int");
-                return (int) -1;
+                goto raise_neg_overflow;
             }
             return (int) val;
         }
@@ -5751,40 +5872,34 @@ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
         if (is_unsigned) {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(int)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return (int) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(int, digit, ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (unlikely(Py_SIZE(x) < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to int");
-                return (int) -1;
+                goto raise_neg_overflow;
             }
             if (sizeof(int) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong)
+                __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong(x))
             } else if (sizeof(int) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong)
+                __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong(x))
             }
         } else {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(int)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return +(int) ((PyLongObject*)x)->ob_digit[0];
-                    case -1: return -(int) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +(((PyLongObject*)x)->ob_digit[0]));
+                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (sizeof(int) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong)
+                __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong(x))
             } else if (sizeof(int) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong)
+                __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong(x))
             }
         }
         {
@@ -5822,6 +5937,14 @@ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
         Py_DECREF(tmp);
         return val;
     }
+raise_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "value too large to convert to int");
+    return (int) -1;
+raise_neg_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "can't convert negative value to int");
+    return (int) -1;
 }
 
 static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
@@ -5850,24 +5973,17 @@ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
     }
 }
 
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
 static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
     const long neg_one = (long) -1, const_zero = 0;
     const int is_unsigned = neg_one > const_zero;
 #if PY_MAJOR_VERSION < 3
     if (likely(PyInt_Check(x))) {
         if (sizeof(long) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG)
+            __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
         } else {
             long val = PyInt_AS_LONG(x);
             if (is_unsigned && unlikely(val < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to long");
-                return (long) -1;
+                goto raise_neg_overflow;
             }
             return (long) val;
         }
@@ -5877,40 +5993,34 @@ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
         if (is_unsigned) {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(long)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return (long) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(long, digit, ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (unlikely(Py_SIZE(x) < 0)) {
-                PyErr_SetString(PyExc_OverflowError,
-                                "can't convert negative value to long");
-                return (long) -1;
+                goto raise_neg_overflow;
             }
             if (sizeof(long) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong)
+                __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong(x))
             } else if (sizeof(long) <= sizeof(unsigned long long)) {
-                __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong)
+                __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong(x))
             }
         } else {
 #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
  #if CYTHON_USE_PYLONG_INTERNALS
-            if (sizeof(digit) <= sizeof(long)) {
-                switch (Py_SIZE(x)) {
-                    case  0: return 0;
-                    case  1: return +(long) ((PyLongObject*)x)->ob_digit[0];
-                    case -1: return -(long) ((PyLongObject*)x)->ob_digit[0];
-                }
+            switch (Py_SIZE(x)) {
+                case  0: return 0;
+                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +(((PyLongObject*)x)->ob_digit[0]));
+                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
             }
  #endif
 #endif
             if (sizeof(long) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong)
+                __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong(x))
             } else if (sizeof(long) <= sizeof(long long)) {
-                __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong)
+                __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong(x))
             }
         }
         {
@@ -5948,6 +6058,14 @@ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
         Py_DECREF(tmp);
         return val;
     }
+raise_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "value too large to convert to long");
+    return (long) -1;
+raise_neg_overflow:
+    PyErr_SetString(PyExc_OverflowError,
+        "can't convert negative value to long");
+    return (long) -1;
 }
 
 static int __Pyx_check_binary_version(void) {
@@ -5960,11 +6078,7 @@ static int __Pyx_check_binary_version(void) {
                       "compiletime version %s of module '%.100s' "
                       "does not match runtime version %s",
                       ctversion, __Pyx_MODULE_NAME, rtversion);
-        #if PY_VERSION_HEX < 0x02050000
-        return PyErr_Warn(NULL, message);
-        #else
         return PyErr_WarnEx(NULL, message, 1);
-        #endif
     }
     return 0;
 }
@@ -6034,11 +6148,7 @@ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class
         PyOS_snprintf(warning, sizeof(warning),
             "%s.%s size changed, may indicate binary incompatibility",
             module_name, class_name);
-        #if PY_VERSION_HEX < 0x02050000
-        if (PyErr_Warn(NULL, warning) < 0) goto bad;
-        #else
         if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
-        #endif
     }
     else if ((size_t)basicsize != size) {
         PyErr_Format(PyExc_ValueError,
@@ -6054,168 +6164,6 @@ bad:
 }
 #endif
 
-static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
-    int start = 0, mid = 0, end = count - 1;
-    if (end >= 0 && code_line > entries[end].code_line) {
-        return count;
-    }
-    while (start < end) {
-        mid = (start + end) / 2;
-        if (code_line < entries[mid].code_line) {
-            end = mid;
-        } else if (code_line > entries[mid].code_line) {
-             start = mid + 1;
-        } else {
-            return mid;
-        }
-    }
-    if (code_line <= entries[mid].code_line) {
-        return mid;
-    } else {
-        return mid + 1;
-    }
-}
-static PyCodeObject *__pyx_find_code_object(int code_line) {
-    PyCodeObject* code_object;
-    int pos;
-    if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
-        return NULL;
-    }
-    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
-    if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
-        return NULL;
-    }
-    code_object = __pyx_code_cache.entries[pos].code_object;
-    Py_INCREF(code_object);
-    return code_object;
-}
-static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
-    int pos, i;
-    __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
-    if (unlikely(!code_line)) {
-        return;
-    }
-    if (unlikely(!entries)) {
-        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
-        if (likely(entries)) {
-            __pyx_code_cache.entries = entries;
-            __pyx_code_cache.max_count = 64;
-            __pyx_code_cache.count = 1;
-            entries[0].code_line = code_line;
-            entries[0].code_object = code_object;
-            Py_INCREF(code_object);
-        }
-        return;
-    }
-    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
-    if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
-        PyCodeObject* tmp = entries[pos].code_object;
-        entries[pos].code_object = code_object;
-        Py_DECREF(tmp);
-        return;
-    }
-    if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
-        int new_max = __pyx_code_cache.max_count + 64;
-        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
-            __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
-        if (unlikely(!entries)) {
-            return;
-        }
-        __pyx_code_cache.entries = entries;
-        __pyx_code_cache.max_count = new_max;
-    }
-    for (i=__pyx_code_cache.count; i>pos; i--) {
-        entries[i] = entries[i-1];
-    }
-    entries[pos].code_line = code_line;
-    entries[pos].code_object = code_object;
-    __pyx_code_cache.count++;
-    Py_INCREF(code_object);
-}
-
-#include "compile.h"
-#include "frameobject.h"
-#include "traceback.h"
-static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
-            const char *funcname, int c_line,
-            int py_line, const char *filename) {
-    PyCodeObject *py_code = 0;
-    PyObject *py_srcfile = 0;
-    PyObject *py_funcname = 0;
-    #if PY_MAJOR_VERSION < 3
-    py_srcfile = PyString_FromString(filename);
-    #else
-    py_srcfile = PyUnicode_FromString(filename);
-    #endif
-    if (!py_srcfile) goto bad;
-    if (c_line) {
-        #if PY_MAJOR_VERSION < 3
-        py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
-        #else
-        py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
-        #endif
-    }
-    else {
-        #if PY_MAJOR_VERSION < 3
-        py_funcname = PyString_FromString(funcname);
-        #else
-        py_funcname = PyUnicode_FromString(funcname);
-        #endif
-    }
-    if (!py_funcname) goto bad;
-    py_code = __Pyx_PyCode_New(
-        0,            /*int argcount,*/
-        0,            /*int kwonlyargcount,*/
-        0,            /*int nlocals,*/
-        0,            /*int stacksize,*/
-        0,            /*int flags,*/
-        __pyx_empty_bytes, /*PyObject *code,*/
-        __pyx_empty_tuple, /*PyObject *consts,*/
-        __pyx_empty_tuple, /*PyObject *names,*/
-        __pyx_empty_tuple, /*PyObject *varnames,*/
-        __pyx_empty_tuple, /*PyObject *freevars,*/
-        __pyx_empty_tuple, /*PyObject *cellvars,*/
-        py_srcfile,   /*PyObject *filename,*/
-        py_funcname,  /*PyObject *name,*/
-        py_line,      /*int firstlineno,*/
-        __pyx_empty_bytes  /*PyObject *lnotab*/
-    );
-    Py_DECREF(py_srcfile);
-    Py_DECREF(py_funcname);
-    return py_code;
-bad:
-    Py_XDECREF(py_srcfile);
-    Py_XDECREF(py_funcname);
-    return NULL;
-}
-static void __Pyx_AddTraceback(const char *funcname, int c_line,
-                               int py_line, const char *filename) {
-    PyCodeObject *py_code = 0;
-    PyObject *py_globals = 0;
-    PyFrameObject *py_frame = 0;
-    py_code = __pyx_find_code_object(c_line ? c_line : py_line);
-    if (!py_code) {
-        py_code = __Pyx_CreateCodeObjectForTraceback(
-            funcname, c_line, py_line, filename);
-        if (!py_code) goto bad;
-        __pyx_insert_code_object(c_line ? c_line : py_line, py_code);
-    }
-    py_globals = PyModule_GetDict(__pyx_m);
-    if (!py_globals) goto bad;
-    py_frame = PyFrame_New(
-        PyThreadState_GET(), /*PyThreadState *tstate,*/
-        py_code,             /*PyCodeObject *code,*/
-        py_globals,          /*PyObject *globals,*/
-        0                    /*PyObject *locals*/
-    );
-    if (!py_frame) goto bad;
-    py_frame->f_lineno = py_line;
-    PyTraceBack_Here(py_frame);
-bad:
-    Py_XDECREF(py_code);
-    Py_XDECREF(py_frame);
-}
-
 static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
     while (t->p) {
         #if PY_MAJOR_VERSION < 3
@@ -6226,7 +6174,7 @@ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
         } else {
             *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
         }
-        #else  /* Python 3+ has unicode identifiers */
+        #else
         if (t->is_unicode | t->is_str) {
             if (t->intern) {
                 *t->p = PyUnicode_InternFromString(t->s);
@@ -6276,11 +6224,11 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_
                 }
             }
         }
-#endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/
+#endif
         *length = PyBytes_GET_SIZE(defenc);
         return defenc_c;
-#else /* PY_VERSION_HEX < 0x03030000 */
-        if (PyUnicode_READY(o) == -1) return NULL;
+#else
+        if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
 #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
         if (PyUnicode_IS_ASCII(o)) {
             *length = PyUnicode_GET_LENGTH(o);
@@ -6289,20 +6237,18 @@ static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_
             PyUnicode_AsASCIIString(o);
             return NULL;
         }
-#else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
+#else
         return PyUnicode_AsUTF8AndSize(o, length);
-#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
-#endif /* PY_VERSION_HEX < 0x03030000 */
+#endif
+#endif
     } else
-#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII  || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */
+#endif
 #if !CYTHON_COMPILING_IN_PYPY
-#if PY_VERSION_HEX >= 0x02060000
     if (PyByteArray_Check(o)) {
         *length = PyByteArray_GET_SIZE(o);
         return PyByteArray_AS_STRING(o);
     } else
 #endif
-#endif
     {
         char* result;
         int r = PyBytes_AsStringAndSize(o, &result, length);
@@ -6363,11 +6309,6 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
   }
   return res;
 }
-#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
- #if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
- #endif
-#endif
 static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
   Py_ssize_t ival;
   PyObject *x;
@@ -6385,11 +6326,7 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
        }
      #endif
     #endif
-  #if PY_VERSION_HEX < 0x02060000
-    return PyInt_AsSsize_t(b);
-  #else
     return PyLong_AsSsize_t(b);
-  #endif
   }
   x = PyNumber_Index(b);
   if (!x) return -1;
@@ -6398,17 +6335,7 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
   return ival;
 }
 static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
-#if PY_VERSION_HEX < 0x02050000
-   if (ival <= LONG_MAX)
-       return PyInt_FromLong((long)ival);
-   else {
-       unsigned char *bytes = (unsigned char *) &ival;
-       int one = 1; int little = (int)*(unsigned char*)&one;
-       return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
-   }
-#else
-   return PyInt_FromSize_t(ival);
-#endif
+    return PyInt_FromSize_t(ival);
 }
 
 
diff --git a/skbio/stats/_misc.py b/skbio/stats/_misc.py
index 14d5684..7ef43b3 100644
--- a/skbio/stats/_misc.py
+++ b/skbio/stats/_misc.py
@@ -8,56 +8,6 @@
 
 from __future__ import absolute_import, division, print_function
 
-import warnings
-
-import numpy as np
-
-
-def p_value_to_str(p_value, permutations):
-    """Format p-value as a string with the correct number of decimals.
-
-    .. note:: Deprecated in scikit-bio 0.2.1-dev
-       ``p_value_to_str`` will be removed in scikit-bio 0.3.0.
-       Permutation-based p-values in scikit-bio are calculated as
-       ``(num_extreme + 1) / (num_permutations + 1)``, so it is impossible to
-       obtain a p-value of zero. This function historically existed for
-       correcting the number of digits displayed when obtaining a p-value of
-       zero. Since this is no longer possible, this functionality will be
-       removed.
-
-    Number of decimals is determined by the number of permutations.
-
-    Parameters
-    ----------
-    p_value : float or None
-        p-value to convert to string.
-    permutations : int
-        Number of permutations used to calculate `p_value`.
-
-    Returns
-    -------
-    str
-        `p_value` formatted as a string with the correct number of decimals. If
-        `p_value` is ``None`` or ``np.nan``, ``'N/A'`` is returned. If
-        `permutations` is less than 10, a message stating insufficient number
-        of permutations is returned.
-    """
-    warnings.warn(
-        "skbio.stats.p_value_to_str is deprecated and will be removed in "
-        "scikit-bio 0.3.0. There are no plans to provide a replacement for "
-        "this functionality.", DeprecationWarning)
-
-    if p_value is None or np.isnan(p_value):
-        result = 'N/A'
-    elif permutations < 10:
-        result = ('Too few permutations to compute p-value (permutations '
-                  '= %d)' % permutations)
-    else:
-        decimal_places = int(np.log10(permutations + 1))
-        result = ('%1.' + '%df' % decimal_places) % p_value
-
-    return result
-
 
 def _pprint_strs(strs, max_chars=80, delimiter=', ', suffix='...',):
     """Pretty-print an iterable of strings, truncating if necessary."""
diff --git a/skbio/stats/_subsample.py b/skbio/stats/_subsample.py
index e2190a7..ba27f4c 100644
--- a/skbio/stats/_subsample.py
+++ b/skbio/stats/_subsample.py
@@ -22,8 +22,10 @@ try:
     from .__subsample import _subsample_counts_without_replacement
 except ImportError:
     pass
+from skbio.util._decorator import experimental
 
 
+ at experimental(as_of="0.4.0")
 def isubsample(items, maximum, minimum=1, buf_size=1000, bin_f=None):
     """Randomly subsample items from bins, without replacement.
 
@@ -153,65 +155,7 @@ def isubsample(items, maximum, minimum=1, buf_size=1000, bin_f=None):
             yield (bin_, item)
 
 
-def subsample(counts, n, replace=False):
-    """Randomly subsample from a vector of counts, with or without replacement.
-
-    .. note:: Deprecated in scikit-bio 0.2.1-dev
-       ``subsample`` will be removed in scikit-bio 0.3.0. It is replaced by
-       ``subsample_counts``, which provides an identical interface; only the
-       function name has changed.
-
-    Parameters
-    ----------
-    counts : 1-D array_like
-        Vector of counts (integers) to randomly subsample from.
-    n : int
-        Number of items to subsample from `counts`. Must be less than or equal
-        to the sum of `counts`.
-    replace : bool, optional
-        If ``True``, subsample with replacement. If ``False`` (the default),
-        subsample without replacement.
-
-    Returns
-    -------
-    subsampled : ndarray
-        Subsampled vector of counts where the sum of the elements equals `n`
-        (i.e., ``subsampled.sum() == n``). Will have the same shape as
-        `counts`.
-
-    Raises
-    ------
-    TypeError
-        If `counts` cannot be safely converted to an integer datatype.
-    ValueError
-        If `n` is less than zero or greater than the sum of `counts`.
-
-    Raises
-    ------
-    EfficiencyWarning
-        If the accelerated code isn't present or hasn't been compiled.
-
-    See Also
-    --------
-    subsample_counts
-
-    Notes
-    -----
-    If subsampling is performed without replacement (``replace=False``), a copy
-    of `counts` is returned if `n` is equal to the number of items in `counts`,
-    as all items will be chosen from the original vector.
-
-    If subsampling is performed with replacement (``replace=True``) and `n` is
-    equal to the number of items in `counts`, the subsampled vector that is
-    returned may not necessarily be the same vector as `counts`.
-
-    """
-    warn("skbio.stats.subsample is deprecated and will be removed in "
-         "scikit-bio 0.3.0. Please update your code to use "
-         "skbio.stats.subsample_counts.", DeprecationWarning)
-    return subsample_counts(counts, n, replace=replace)
-
-
+ at experimental(as_of="0.4.0")
 def subsample_counts(counts, n, replace=False):
     """Randomly subsample from a vector of counts, with or without replacement.
 
diff --git a/skbio/stats/composition.py b/skbio/stats/composition.py
new file mode 100644
index 0000000..e0b6c67
--- /dev/null
+++ b/skbio/stats/composition.py
@@ -0,0 +1,389 @@
+r"""
+Composition Statistics (:mod:`skbio.stats.composition`)
+=======================================================
+
+.. currentmodule:: skbio.stats.composition
+
+This module provides functions for compositional data analysis.
+
+Many 'omics datasets are inherently compositional - meaning that they
+are best interpreted as proportions or percentages rather than
+absolute counts.
+
+Formally, :math:`x` is a composition if :math:`\sum_{i=0}^D x_{i} = c`
+and :math:`x_{i} > 0`, :math:`1 \leq i \leq D` and :math:`c` is a real
+valued constant and there are :math:`D` components for each
+composition. In this module :math:`c=1`. Compositional data can be
+analyzed using Aitchison geometry. [1]_
+
+However, in this framework, standard real Euclidean operations such as
+addition and multiplication no longer apply. Only operations such as
+perturbation and power can be used to manipulate this data. [1]_
+
+This module allows two styles of manipulation of compositional data.
+Compositional data can be analyzed using perturbation and power
+operations, which can be useful for simulation studies. The
+alternative strategy is to transform compositional data into the real
+space.  Right now, the centre log ratio transform (clr) [1]_ can be
+used to accomplish this.  This transform can be useful for performing
+standard statistical tools such as parametric hypothesis testing,
+regressions and more.
+
+The major caveat of using this framework is dealing with zeros.  In
+the Aitchison geometry, only compositions with nonzero components can
+be considered. The multiplicative replacement technique [2]_ can be
+used to substitute these zeros with small pseudocounts without
+introducing major distortions to the data.
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+   closure
+   multiplicative_replacement
+   perturb
+   perturb_inv
+   power
+   clr
+   centralize
+
+References
+----------
+.. [1] V. Pawlowsky-Glahn. "Lecture Notes on Compositional Data Analysis"
+.. [2] J. A. Martin-Fernandez. "Dealing With Zeros and Missing Values in
+       Compositional Data Sets Using Nonparametric Imputation"
+
+
+Examples
+--------
+
+>>> import numpy as np
+
+Consider a very simple environment with only 3 species. The species
+in the environment are equally distributed and their proportions are
+equivalent:
+
+>>> otus = np.array([1./3, 1./3., 1./3])
+
+Suppose that an antibiotic kills off half of the population for the
+first two species, but doesn't harm the third species. Then the
+perturbation vector would be as follows
+
+>>> antibiotic = np.array([1./2, 1./2, 1])
+
+And the resulting perturbation would be
+
+>>> perturb(otus, antibiotic)
+array([ 0.25,  0.25,  0.5 ])
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+import numpy as np
+import scipy.stats as ss
+
+from skbio.util._decorator import experimental
+
+
+ at experimental(as_of="0.4.0")
+def closure(mat):
+    """
+    Performs closure to ensure that all elements add up to 1.
+
+    Parameters
+    ----------
+    mat : array_like
+       a matrix of proportions where
+       rows = compositions
+       columns = components
+
+    Returns
+    -------
+    array_like, np.float64
+       A matrix of proportions where all of the values
+       are nonzero and each composition (row) adds up to 1
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import closure
+    >>> X = np.array([[2, 2, 6], [4, 4, 2]])
+    >>> closure(X)
+    array([[ 0.2,  0.2,  0.6],
+           [ 0.4,  0.4,  0.2]])
+
+    """
+    mat = np.atleast_2d(mat)
+    if np.any(mat < 0):
+        raise ValueError("Cannot have negative proportions")
+    if mat.ndim > 2:
+        raise ValueError("Input matrix can only have two dimensions or less")
+    mat = mat / mat.sum(axis=1, keepdims=True)
+    return mat.squeeze()
+
+
+ at experimental(as_of="0.4.0")
+def multiplicative_replacement(mat, delta=None):
+    r"""Replace all zeros with small non-zero values
+
+    It uses the multiplicative replacement strategy [1]_ ,
+    replacing zeros with a small positive :math:`\delta`
+    and ensuring that the compositions still add up to 1.
+
+
+    Parameters
+    ----------
+    mat: array_like
+       a matrix of proportions where
+       rows = compositions and
+       columns = components
+    delta: float, optional
+       a small number to be used to replace zeros
+       If delta is not specified, then the default delta is
+       :math:`\delta = \frac{1}{N^2}` where :math:`N`
+       is the number of components
+
+    Returns
+    -------
+    numpy.ndarray, np.float64
+       A matrix of proportions where all of the values
+       are nonzero and each composition (row) adds up to 1
+
+    References
+    ----------
+    .. [1] J. A. Martin-Fernandez. "Dealing With Zeros and Missing Values in
+           Compositional Data Sets Using Nonparametric Imputation"
+
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import multiplicative_replacement
+    >>> X = np.array([[.2,.4,.4, 0],[0,.5,.5,0]])
+    >>> multiplicative_replacement(X)
+    array([[ 0.1875,  0.375 ,  0.375 ,  0.0625],
+           [ 0.0625,  0.4375,  0.4375,  0.0625]])
+
+    """
+    mat = closure(mat)
+    z_mat = (mat == 0)
+
+    num_feats = mat.shape[-1]
+    tot = z_mat.sum(axis=-1, keepdims=True)
+
+    if delta is None:
+        delta = (1. / num_feats)**2
+
+    zcnts = 1 - tot * delta
+    mat = np.where(z_mat, delta, zcnts * mat)
+    return mat.squeeze()
+
+
+ at experimental(as_of="0.4.0")
+def perturb(x, y):
+    r"""
+    Performs the perturbation operation.
+
+    This operation is defined as
+    :math:`x \oplus y = C[x_1 y_1, ..., x_D y_D]`
+
+    :math:`C[x]` is the closure operation defined as
+    :math:`C[x] = [\frac{x_1}{\sum x},...,\frac{x_D}{\sum x}]`
+    for some :math:`D` dimensional real vector :math:`x` and
+    :math:`D` is the number of components for every composition.
+
+    Parameters
+    ----------
+    x : array_like, float
+        a matrix of proportions where
+        rows = compositions and
+        columns = components
+    y : array_like, float
+        a matrix of proportions where
+        rows = compositions and
+        columns = components
+
+    Returns
+    -------
+    numpy.ndarray, np.float64
+       A matrix of proportions where all of the values
+       are nonzero and each composition (row) adds up to 1
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import perturb
+    >>> x = np.array([.1,.3,.4, .2])
+    >>> y = np.array([1./6,1./6,1./3,1./3])
+    >>> perturb(x,y)
+    array([ 0.0625,  0.1875,  0.5   ,  0.25  ])
+
+    """
+    x, y = closure(x), closure(y)
+    return closure(x * y)
+
+
+ at experimental(as_of="0.4.0")
+def perturb_inv(x, y):
+    r"""
+    Performs the inverse perturbation operation.
+
+    This operation is defined as
+    :math:`x \ominus y = C[x_1 y_1^{-1}, ..., x_D y_D^{-1}]`
+
+    :math:`C[x]` is the closure operation defined as
+    :math:`C[x] = [\frac{x_1}{\sum x},...,\frac{x_D}{\sum x}]`
+    for some :math:`D` dimensional real vector :math:`x` and
+    :math:`D` is the number of components for every composition.
+
+    Parameters
+    ----------
+    x : array_like
+        a matrix of proportions where
+        rows = compositions and
+        columns = components
+    y : array_like
+        a matrix of proportions where
+        rows = compositions and
+        columns = components
+
+    Returns
+    -------
+    numpy.ndarray, np.float64
+       A matrix of proportions where all of the values
+       are nonzero and each composition (row) adds up to 1
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import perturb_inv
+    >>> x = np.array([.1,.3,.4, .2])
+    >>> y = np.array([1./6,1./6,1./3,1./3])
+    >>> perturb_inv(x,y)
+    array([ 0.14285714,  0.42857143,  0.28571429,  0.14285714])
+
+    """
+    x, y = closure(x), closure(y)
+    return closure(x / y)
+
+
+ at experimental(as_of="0.4.0")
+def power(x, a):
+    r"""
+    Performs the power operation.
+
+    This operation is defined as follows
+    :math:`x \odot a = C[x_1^a, ..., x_D^a]`
+
+    :math:`C[x]` is the closure operation defined as
+    :math:`C[x] = [\frac{x_1}{\sum x},...,\frac{x_D}{\sum x}]`
+    for some :math:`D` dimensional real vector :math:`x` and
+    :math:`D` is the number of components for every composition.
+
+    Parameters
+    ----------
+    x : array_like, float
+        a matrix of proportions where
+        rows = compositions and
+        columns = components
+    a : float
+        a scalar float
+
+    Returns
+    -------
+    numpy.ndarray, np.float64
+       A matrix of proportions where all of the values
+       are nonzero and each composition (row) adds up to 1
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import power
+    >>> x = np.array([.1,.3,.4, .2])
+    >>> power(x, .1)
+    array([ 0.23059566,  0.25737316,  0.26488486,  0.24714631])
+
+    """
+    x = closure(x)
+    return closure(x**a).squeeze()
+
+
+ at experimental(as_of="0.4.0")
+def clr(mat):
+    r"""
+    Performs centre log ratio transformation.
+
+    This function transforms compositions from Aitchison geometry to
+    the real space. This transformation is an isometry, but not an
+    isomorphism. It is defined for a composition :math:`x` as follows:
+
+    :math:`clr(x) = ln[\frac{x_1}{g_m(x)}, ..., \frac{x_D}{g_m(x)}]`
+    where :math:`g_m(x) = (\prod_{i=1}^{D} x_i)^{1/D}` is the geometric
+    mean of :math:`x`.
+
+    Parameters
+    ----------
+    mat : array_like, float
+       a matrix of proportions where
+       rows = compositions and
+       columns = components
+
+    Returns
+    -------
+    numpy.ndarray
+         clr transformed matrix
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import clr
+    >>> x = np.array([.1,.3,.4, .2])
+    >>> clr(x)
+    array([-0.79451346,  0.30409883,  0.5917809 , -0.10136628])
+
+    """
+    mat = closure(mat)
+    lmat = np.log(mat)
+    gm = lmat.mean(axis=-1, keepdims=True)
+    return (lmat - gm).squeeze()
+
+
+ at experimental(as_of="0.4.0")
+def centralize(mat):
+    """Center data around its geometric average.
+
+    Parameters
+    ----------
+    mat : array_like, float
+       a matrix of proportions where
+       rows = compositions and
+       columns = components
+
+    Returns
+    -------
+    numpy.ndarray
+         centered composition matrix
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from skbio.stats.composition import centralize
+    >>> X = np.array([[.1,.3,.4, .2],[.2,.2,.2,.4]])
+    >>> centralize(X)
+    array([[ 0.17445763,  0.30216948,  0.34891526,  0.17445763],
+           [ 0.32495488,  0.18761279,  0.16247744,  0.32495488]])
+
+    """
+    mat = closure(mat)
+    cen = ss.gmean(mat, axis=0)
+    return perturb_inv(mat, cen)
diff --git a/skbio/stats/distance/__init__.py b/skbio/stats/distance/__init__.py
index 6409e6b..5a9cdf3 100644
--- a/skbio/stats/distance/__init__.py
+++ b/skbio/stats/distance/__init__.py
@@ -75,9 +75,9 @@ three objects with IDs ``a``, ``b``, and ``c``::
 
 Load a distance matrix from the file:
 
->>> from StringIO import StringIO
+>>> from io import StringIO
 >>> from skbio import DistanceMatrix
->>> dm_fh = StringIO("\\ta\\tb\\tc\\n"
+>>> dm_fh = StringIO(u"\\ta\\tb\\tc\\n"
 ...                  "a\\t0.0\\t0.5\\t1.0\\n"
 ...                  "b\\t0.5\\t0.0\\t0.75\\n"
 ...                  "c\\t1.0\\t0.75\\t0.0\\n")
@@ -85,7 +85,7 @@ Load a distance matrix from the file:
 >>> print(dm)
 3x3 distance matrix
 IDs:
-'a', 'b', 'c'
+u'a', u'b', u'c'
 Data:
 [[ 0.    0.5   1.  ]
  [ 0.5   0.    0.75]
@@ -109,7 +109,7 @@ array([ 1.  ,  0.75,  0.  ])
 Serialize the distance matrix to delimited text file:
 
 >>> out_fh = StringIO()
->>> dm.write(out_fh)
+>>> _ = dm.write(out_fh)
 >>> out_fh.getvalue() == dm_fh.getvalue()
 True
 
@@ -154,7 +154,6 @@ Categorical Variable Stats
 
    anosim
    permanova
-   CategoricalStatsResults
 
 Continuous Variable Stats
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -188,19 +187,20 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
 
 from ._base import (DissimilarityMatrixError, DistanceMatrixError,
                     MissingIDError, DissimilarityMatrix, DistanceMatrix,
-                    CategoricalStatsResults, randdm)
+                    randdm)
 from ._bioenv import bioenv
-from ._anosim import anosim, ANOSIM
-from ._permanova import permanova, PERMANOVA
+from ._anosim import anosim
+from ._permanova import permanova
 from ._mantel import mantel, pwmantel
 
 __all__ = ['DissimilarityMatrixError', 'DistanceMatrixError', 'MissingIDError',
            'DissimilarityMatrix', 'DistanceMatrix', 'randdm', 'anosim',
-           'ANOSIM', 'permanova', 'PERMANOVA', 'CategoricalStatsResults',
-           'bioenv', 'mantel', 'pwmantel']
+           'permanova', 'bioenv', 'mantel', 'pwmantel']
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/stats/distance/_anosim.py b/skbio/stats/distance/_anosim.py
index 51b7b49..93d8961 100644
--- a/skbio/stats/distance/_anosim.py
+++ b/skbio/stats/distance/_anosim.py
@@ -8,16 +8,16 @@
 
 from __future__ import absolute_import, division, print_function
 
-import warnings
 from functools import partial
 
 import numpy as np
 from scipy.stats import rankdata
 
-from ._base import (_preprocess_input, _run_monte_carlo_stats, _build_results,
-                    CategoricalStats)
+from ._base import (_preprocess_input, _run_monte_carlo_stats, _build_results)
+from skbio.util._decorator import experimental
 
 
+ at experimental(as_of="0.4.0")
 def anosim(distance_matrix, grouping, column=None, permutations=999):
     """Test for significant differences between groups using ANOSIM.
 
@@ -204,72 +204,3 @@ def _compute_r_stat(tri_idxs, ranked_dists, divisor, grouping):
     r_B = np.mean(ranked_dists[np.invert(grouping_tri)])
 
     return (r_B - r_W) / divisor
-
-
-class ANOSIM(CategoricalStats):
-    """ANOSIM statistical method executor.
-
-    .. note:: Deprecated in scikit-bio 0.2.1-dev
-       ``ANOSIM`` will be removed in scikit-bio 0.3.0. It is replaced by
-       ``anosim``, which provides a simpler procedural interface to running
-       this statistical method.
-
-    Analysis of Similarities (ANOSIM) is a non-parametric method that tests
-    whether two or more groups of objects are significantly different based on
-    a categorical factor. The ranks of the distances in the distance matrix are
-    used to calculate an R statistic, which ranges between -1 (anti-grouping)
-    to +1 (strong grouping), with an R value of 0 indicating random grouping.
-
-    Notes
-    -----
-    See [1]_ for the original ANOSIM reference. The general algorithm and
-    interface are similar to ``vegan::anosim``, available in R's vegan package
-    [2]_.
-
-    References
-    ----------
-    .. [1] Clarke, KR. "Non-parametric multivariate analyses of changes in
-       community structure." Australian journal of ecology 18.1 (1993):
-       117-143.
-
-    .. [2] http://cran.r-project.org/web/packages/vegan/index.html
-
-    """
-
-    short_method_name = 'ANOSIM'
-    long_method_name = 'Analysis of Similarities'
-    test_statistic_name = 'R statistic'
-
-    def __init__(self, distance_matrix, grouping, column=None):
-        warnings.warn(
-            "skbio.stats.distance.ANOSIM is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use "
-            "skbio.stats.distance.anosim.", DeprecationWarning)
-
-        super(ANOSIM, self).__init__(distance_matrix, grouping, column=column)
-
-        self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4)
-        self._ranked_dists = rankdata(self._dm.condensed_form(),
-                                      method='average')
-
-    def _run(self, grouping):
-        """Compute ANOSIM R statistic (between -1 and +1)."""
-        # Create a matrix where True means that the two objects are in the same
-        # group. This ufunc requires that grouping is a numeric vector (e.g.,
-        # it won't work with a grouping vector of strings).
-        grouping_matrix = np.equal.outer(grouping, grouping)
-
-        # Extract upper triangle from the grouping matrix. It is important to
-        # extract the values in the same order that the distances are extracted
-        # from the distance matrix (see self._ranked_dists). Extracting the
-        # upper triangle (excluding the diagonal) preserves this order.
-        grouping_tri = grouping_matrix[self._tri_idxs]
-
-        return self._compute_r_stat(grouping_tri)
-
-    def _compute_r_stat(self, grouping_tri):
-        # within
-        r_W = np.mean(self._ranked_dists[grouping_tri])
-        # between
-        r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)])
-        return (r_B - r_W) / self._divisor
diff --git a/skbio/stats/distance/_base.py b/skbio/stats/distance/_base.py
index 1579431..4ffc49d 100644
--- a/skbio/stats/distance/_base.py
+++ b/skbio/stats/distance/_base.py
@@ -7,10 +7,8 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-from six import StringIO, string_types
+from six import string_types
 
-import csv
-import warnings
 from copy import deepcopy
 
 import matplotlib.pyplot as plt
@@ -21,9 +19,9 @@ import pandas as pd
 from scipy.spatial.distance import squareform
 
 from skbio._base import SkbioObject
-from skbio.stats import p_value_to_str
 from skbio.stats._misc import _pprint_strs
 from skbio.util import find_duplicates
+from skbio.util._decorator import experimental
 
 
 class DissimilarityMatrixError(Exception):
@@ -39,6 +37,7 @@ class DistanceMatrixError(DissimilarityMatrixError):
 class MissingIDError(DissimilarityMatrixError):
     """Error for ID lookup that doesn't exist in the dissimilarity matrix."""
 
+    @experimental(as_of="0.4.0")
     def __init__(self, missing_id):
         super(MissingIDError, self).__init__()
         self.args = ("The ID '%s' is not in the dissimilarity matrix." %
@@ -72,17 +71,6 @@ class DissimilarityMatrix(SkbioObject):
         monotonically-increasing integers cast as strings, with numbering
         starting from zero, e.g., ``('0', '1', '2', '3', ...)``.
 
-    Attributes
-    ----------
-    data
-    ids
-    dtype
-    shape
-    size
-    T
-    png
-    svg
-
     See Also
     --------
     DistanceMatrix
@@ -103,78 +91,7 @@ class DissimilarityMatrix(SkbioObject):
     # Used in __str__
     _matrix_element_name = 'dissimilarity'
 
-    @classmethod
-    def from_file(cls, lsmat_f, delimiter='\t'):
-        """Load dissimilarity matrix from delimited text file.
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``from_file`` will be removed in scikit-bio 0.3.0. It is replaced by
-           ``read``, which is a more general method for deserializing
-           dissimilarity/distance matrices. ``read`` supports multiple file
-           formats, automatic file format detection, etc. by taking advantage
-           of scikit-bio's I/O registry system. See :mod:`skbio.io` for more
-           details.
-
-        Creates a ``DissimilarityMatrix`` (or subclass) instance from a
-        ``lsmat`` formatted file. See :mod:`skbio.io.lsmat` for the format
-        specification.
-
-        Parameters
-        ----------
-        lsmat_f: filepath or filehandle
-            File to read from.
-        delimiter : str, optional
-            String delimiting elements in `lsmat_f`.
-
-        Returns
-        -------
-        DissimilarityMatrix
-            Instance of type `cls` containing the parsed contents of `lsmat_f`.
-
-        See Also
-        --------
-        read
-
-        """
-        warnings.warn(
-            "DissimilarityMatrix.from_file and DistanceMatrix.from_file are "
-            "deprecated and will be removed in scikit-bio 0.3.0. Please "
-            "update your code to use DissimilarityMatrix.read and "
-            "DistanceMatrix.read.", DeprecationWarning)
-        return cls.read(lsmat_f, format='lsmat', delimiter=delimiter)
-
-    def to_file(self, out_f, delimiter='\t'):
-        """Save dissimilarity matrix to file as delimited text.
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``to_file`` will be removed in scikit-bio 0.3.0. It is replaced by
-           ``write``, which is a more general method for serializing
-           dissimilarity/distance matrices. ``write`` supports multiple file
-           formats by taking advantage of scikit-bio's I/O registry system.
-           See :mod:`skbio.io` for more details.
-
-        Serializes dissimilarity matrix as a ``lsmat`` formatted file. See
-        :mod:`skbio.io.lsmat` for the format specification.
-
-        Parameters
-        ----------
-        out_f : filepath or filehandle
-            File to write to.
-        delimiter : str, optional
-            Delimiter used to separate elements in output format.
-
-        See Also
-        --------
-        write
-
-        """
-        warnings.warn(
-            "DissimilarityMatrix.to_file and DistanceMatrix.to_file are "
-            "deprecated and will be removed in scikit-bio 0.3.0. Please "
-            "update your code to use DissimilarityMatrix.write and "
-            "DistanceMatrix.write.", DeprecationWarning)
-        self.write(out_f, format='lsmat', delimiter=delimiter)
-
+    @experimental(as_of="0.4.0")
     def __init__(self, data, ids=None):
         if isinstance(data, DissimilarityMatrix):
             data = data.data
@@ -191,6 +108,7 @@ class DissimilarityMatrix(SkbioObject):
         self._id_index = self._index_list(self._ids)
 
     @property
+    @experimental(as_of="0.4.0")
     def data(self):
         """Array of dissimilarities.
 
@@ -205,6 +123,7 @@ class DissimilarityMatrix(SkbioObject):
         return self._data
 
     @property
+    @experimental(as_of="0.4.0")
     def ids(self):
         """Tuple of object IDs.
 
@@ -226,11 +145,13 @@ class DissimilarityMatrix(SkbioObject):
         self._id_index = self._index_list(self._ids)
 
     @property
+    @experimental(as_of="0.4.0")
     def dtype(self):
         """Data type of the dissimilarities."""
         return self.data.dtype
 
     @property
+    @experimental(as_of="0.4.0")
     def shape(self):
         """Two-element tuple containing the dissimilarity matrix dimensions.
 
@@ -243,6 +164,7 @@ class DissimilarityMatrix(SkbioObject):
         return self.data.shape
 
     @property
+    @experimental(as_of="0.4.0")
     def size(self):
         """Total number of elements in the dissimilarity matrix.
 
@@ -254,6 +176,7 @@ class DissimilarityMatrix(SkbioObject):
         return self.data.size
 
     @property
+    @experimental(as_of="0.4.0")
     def T(self):
         """Transpose of the dissimilarity matrix.
 
@@ -264,6 +187,7 @@ class DissimilarityMatrix(SkbioObject):
         """
         return self.transpose()
 
+    @experimental(as_of="0.4.0")
     def transpose(self):
         """Return the transpose of the dissimilarity matrix.
 
@@ -280,6 +204,7 @@ class DissimilarityMatrix(SkbioObject):
         """
         return self.__class__(self.data.T.copy(), deepcopy(self.ids))
 
+    @experimental(as_of="0.4.0")
     def index(self, lookup_id):
         """Return the index of the specified ID.
 
@@ -304,6 +229,7 @@ class DissimilarityMatrix(SkbioObject):
         else:
             raise MissingIDError(lookup_id)
 
+    @experimental(as_of="0.4.0")
     def redundant_form(self):
         """Return an array of dissimilarities in redundant format.
 
@@ -329,6 +255,7 @@ class DissimilarityMatrix(SkbioObject):
         """
         return self.data
 
+    @experimental(as_of="0.4.0")
     def copy(self):
         """Return a deep copy of the dissimilarity matrix.
 
@@ -343,6 +270,7 @@ class DissimilarityMatrix(SkbioObject):
         # point in the future.
         return self.__class__(self.data.copy(), deepcopy(self.ids))
 
+    @experimental(as_of="0.4.0")
     def filter(self, ids, strict=True):
         """Filter the dissimilarity matrix by IDs.
 
@@ -385,6 +313,7 @@ class DissimilarityMatrix(SkbioObject):
         filtered_data = self._data[idxs][:, idxs]
         return self.__class__(filtered_data, ids)
 
+    @experimental(as_of="0.4.0")
     def plot(self, cmap=None, title=""):
         """Creates a heatmap of the dissimilarity matrix
 
@@ -451,6 +380,7 @@ class DissimilarityMatrix(SkbioObject):
         return self._figure_data('svg')
 
     @property
+    @experimental(as_of="0.4.0")
     def png(self):
         """Display heatmap in IPython Notebook as PNG.
 
@@ -458,6 +388,7 @@ class DissimilarityMatrix(SkbioObject):
         return Image(self._repr_png_(), embed=True)
 
     @property
+    @experimental(as_of="0.4.0")
     def svg(self):
         """Display heatmap in IPython Notebook as SVG.
 
@@ -472,6 +403,7 @@ class DissimilarityMatrix(SkbioObject):
         plt.close(fig)
         return data
 
+    @experimental(as_of="0.4.0")
     def __str__(self):
         """Return a string representation of the dissimilarity matrix.
 
@@ -483,13 +415,12 @@ class DissimilarityMatrix(SkbioObject):
         str
             String representation of the dissimilarity matrix.
 
-        .. shownumpydoc
-
         """
         return '%dx%d %s matrix\nIDs:\n%s\nData:\n' % (
             self.shape[0], self.shape[1], self._matrix_element_name,
             _pprint_strs(self.ids)) + str(self.data)
 
+    @experimental(as_of="0.4.0")
     def __eq__(self, other):
         """Compare this dissimilarity matrix to another for equality.
 
@@ -509,8 +440,6 @@ class DissimilarityMatrix(SkbioObject):
         bool
             ``True`` if `self` is equal to `other`, ``False`` otherwise.
 
-        .. shownumpydoc
-
         """
         equal = True
 
@@ -532,6 +461,7 @@ class DissimilarityMatrix(SkbioObject):
 
         return equal
 
+    @experimental(as_of="0.4.0")
     def __ne__(self, other):
         """Determine whether two dissimilarity matrices are not equal.
 
@@ -549,11 +479,10 @@ class DissimilarityMatrix(SkbioObject):
         --------
         __eq__
 
-        .. shownumpydoc
-
         """
         return not self == other
 
+    @experimental(as_of="0.4.0")
     def __contains__(self, lookup_id):
         """Check if the specified ID is in the dissimilarity matrix.
 
@@ -572,11 +501,10 @@ class DissimilarityMatrix(SkbioObject):
         --------
         index
 
-        .. shownumpydoc
-
         """
         return lookup_id in self._id_index
 
+    @experimental(as_of="0.4.0")
     def __getitem__(self, index):
         """Slice into dissimilarity data by object ID or numpy indexing.
 
@@ -623,8 +551,6 @@ class DissimilarityMatrix(SkbioObject):
         -----
         The lookup based on ID(s) is quick.
 
-        .. shownumpydoc
-
         """
         if isinstance(index, string_types):
             return self.data[self.index(index)]
@@ -722,6 +648,7 @@ class DistanceMatrix(DissimilarityMatrix):
     # Override here, used in superclass __str__
     _matrix_element_name = 'distance'
 
+    @experimental(as_of="0.4.0")
     def condensed_form(self):
         """Return an array of distances in condensed format.
 
@@ -744,6 +671,7 @@ class DistanceMatrix(DissimilarityMatrix):
         """
         return squareform(self._data, force='tovector', checks=False)
 
+    @experimental(as_of="0.4.0")
     def permute(self, condensed=False):
         """Randomly permute both rows and columns in the matrix.
 
@@ -797,6 +725,7 @@ class DistanceMatrix(DissimilarityMatrix):
             raise DistanceMatrixError("Data must be symmetric.")
 
 
+ at experimental(as_of="0.4.0")
 def randdm(num_objects, ids=None, constructor=None, random_fn=None):
     """Generate a distance matrix populated with random distances.
 
@@ -844,7 +773,7 @@ def randdm(num_objects, ids=None, constructor=None, random_fn=None):
         random_fn = np.random.rand
 
     data = np.tril(random_fn(num_objects, num_objects), -1)
-    data += data.T
+    data = data + data.T
 
     if not ids:
         ids = map(str, range(1, num_objects + 1))
@@ -976,257 +905,3 @@ def _build_results(method_name, test_stat_name, sample_size, num_groups, stat,
                'number of groups', 'test statistic', 'p-value',
                'number of permutations'],
         name='%s results' % method_name)
-
-
-class CategoricalStats(object):
-    """Base class for categorical statistical methods.
-
-    Categorical statistical methods generally test for significant differences
-    between discrete groups of objects, as determined by a categorical variable
-    (grouping vector).
-
-    See Also
-    --------
-    ANOSIM
-    PERMANOVA
-
-    """
-
-    short_method_name = ''
-    long_method_name = ''
-    test_statistic_name = ''
-
-    def __init__(self, distance_matrix, grouping, column=None):
-        if not isinstance(distance_matrix, DistanceMatrix):
-            raise TypeError("Input must be a DistanceMatrix.")
-
-        if isinstance(grouping, pd.DataFrame):
-            if column is None:
-                raise ValueError("Must provide a column name if supplying a "
-                                 "data frame.")
-            else:
-                grouping = self._df_to_vector(distance_matrix, grouping,
-                                              column)
-        elif column is not None:
-            raise ValueError("Must provide a data frame if supplying a column "
-                             "name.")
-
-        if len(grouping) != distance_matrix.shape[0]:
-            raise ValueError("Grouping vector size must match the number of "
-                             "IDs in the distance matrix.")
-
-        # Find the group labels and convert grouping to an integer vector
-        # (factor).
-        groups, grouping = np.unique(grouping, return_inverse=True)
-
-        if len(groups) == len(grouping):
-            raise ValueError("All values in the grouping vector are unique. "
-                             "This method cannot operate on a grouping vector "
-                             "with only unique values (e.g., there are no "
-                             "'within' distances because each group of "
-                             "objects contains only a single object).")
-        if len(groups) == 1:
-            raise ValueError("All values in the grouping vector are the same. "
-                             "This method cannot operate on a grouping vector "
-                             "with only a single group of objects (e.g., "
-                             "there are no 'between' distances because there "
-                             "is only a single group).")
-
-        self._dm = distance_matrix
-        self._grouping = grouping
-        self._groups = groups
-        self._tri_idxs = np.triu_indices(self._dm.shape[0], k=1)
-
-    def _df_to_vector(self, distance_matrix, df, column):
-        """Return a grouping vector from a data frame column.
-
-        Parameters
-        ----------
-        distance_marix : DistanceMatrix
-            Distance matrix whose IDs will be mapped to group labels.
-        df : pandas.DataFrame
-            ``DataFrame`` (indexed by distance matrix ID).
-        column : str
-            Column name in `df` containing group labels.
-
-        Returns
-        -------
-        list
-            Grouping vector (vector of labels) based on the IDs in
-            `distance_matrix`. Each ID's label is looked up in the data frame
-            under the column specified by `column`.
-
-        Raises
-        ------
-        ValueError
-            If `column` is not in the data frame, or a distance matrix ID is
-            not in the data frame.
-
-        """
-        if column not in df:
-            raise ValueError("Column '%s' not in data frame." % column)
-
-        grouping = df.loc[distance_matrix.ids, column]
-        if grouping.isnull().any():
-            raise ValueError("One or more IDs in the distance matrix are not "
-                             "in the data frame.")
-        return grouping.tolist()
-
-    def __call__(self, permutations=999):
-        """Execute the statistical method.
-
-        Parameters
-        ----------
-        permutations : int, optional
-            Number of permutations to use when calculating statistical
-            significance. Must be >= 0. If 0, the resulting p-value will be
-            ``None``.
-
-        Returns
-        -------
-        CategoricalStatsResults
-            Results of the method, including test statistic and p-value.
-
-        .. shownumpydoc
-
-        """
-        if permutations < 0:
-            raise ValueError("Number of permutations must be greater than or "
-                             "equal to zero.")
-
-        stat = self._run(self._grouping)
-
-        p_value = None
-        if permutations > 0:
-            perm_stats = np.empty(permutations, dtype=np.float64)
-
-            for i in range(permutations):
-                perm_grouping = np.random.permutation(self._grouping)
-                perm_stats[i] = self._run(perm_grouping)
-
-            p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1)
-
-        return CategoricalStatsResults(self.short_method_name,
-                                       self.long_method_name,
-                                       self.test_statistic_name,
-                                       self._dm.shape[0], self._groups, stat,
-                                       p_value, permutations)
-
-    def _run(self, grouping):
-        raise NotImplementedError("Subclasses must implement _run().")
-
-
-class CategoricalStatsResults(object):
-    """Statistical method results container.
-
-    .. note:: Deprecated in scikit-bio 0.2.1-dev
-       ``CategoricalStatsResults`` will be removed in scikit-bio 0.3.0. It is
-       replaced by ``pandas.Series`` for storing statistical method results.
-       Please update your code to use ``skbio.stats.distance.anosim`` or
-       ``skbio.stats.distance.permanova``, which will return a
-       ``pandas.Series``.
-
-    Stores the results of running a `CategoricalStats` method a single time,
-    and provides a way to format the results.
-
-    Attributes
-    ----------
-    short_method_name
-    long_method_name
-    test_statistic_name
-    sample_size
-    groups
-    statistic
-    p_value
-    permutations
-
-    Notes
-    -----
-    Users will generally not directly instantiate objects of this class. The
-    various categorical statistical methods will return an object of this type
-    when they are run.
-
-    """
-
-    def __init__(self, short_method_name, long_method_name,
-                 test_statistic_name, sample_size, groups, statistic, p_value,
-                 permutations):
-        warnings.warn(
-            "skbio.stats.distance.CategoricalStatsResults is deprecated and "
-            "will be removed in scikit-bio 0.3.0. Please update your code to "
-            "use either skbio.stats.distance.anosim or "
-            "skbio.stats.distance.permanova, which will return a "
-            "pandas.Series object.", DeprecationWarning)
-
-        self.short_method_name = short_method_name
-        self.long_method_name = long_method_name
-        self.test_statistic_name = test_statistic_name
-        self.sample_size = sample_size
-        self.groups = groups
-        self.statistic = statistic
-        self.p_value = p_value
-        self.permutations = permutations
-
-    def __str__(self):
-        """Return pretty-print (fixed width) string."""
-        rows = (self._format_header(), self._format_data())
-
-        max_widths = []
-        for col_idx in range(len(rows[0])):
-            max_widths.append(max(map(lambda e: len(e[col_idx]), rows)))
-
-        results = []
-        for row in rows:
-            padded_row = []
-            for col_idx, val in enumerate(row):
-                padded_row.append(val.rjust(max_widths[col_idx]))
-            results.append('  '.join(padded_row))
-
-        return '\n'.join(results) + '\n'
-
-    def _repr_html_(self):
-        """Return a string containing an HTML table of results.
-
-        This method will be called within the IPython Notebook instead of
-        __repr__ to display results.
-
-        """
-        header = self._format_header()
-        data = self._format_data()
-        return pd.DataFrame([data[1:]], columns=header[1:],
-                            index=[data[0]])._repr_html_()
-
-    def summary(self, delimiter='\t'):
-        """Return a formatted summary of results as a string.
-
-        The string is formatted as delimited text.
-
-        Parameters
-        ----------
-        delimiter : str, optional
-            String to delimit fields by in formatted output. Default is tab
-            (TSV).
-
-        Returns
-        -------
-        str
-            Delimited-text summary of results.
-
-        """
-        summary = StringIO()
-        csv_writer = csv.writer(summary, delimiter=delimiter,
-                                lineterminator='\n')
-        csv_writer.writerow(self._format_header())
-        csv_writer.writerow(self._format_data())
-        return summary.getvalue()
-
-    def _format_header(self):
-        return ('Method name', 'Sample size', 'Number of groups',
-                self.test_statistic_name, 'p-value', 'Number of permutations')
-
-    def _format_data(self):
-        p_value_str = p_value_to_str(self.p_value, self.permutations)
-
-        return (self.short_method_name, '%d' % self.sample_size,
-                '%d' % len(self.groups), str(self.statistic), p_value_str,
-                '%d' % self.permutations)
diff --git a/skbio/stats/distance/_bioenv.py b/skbio/stats/distance/_bioenv.py
index 3ce7ae6..b4288f7 100644
--- a/skbio/stats/distance/_bioenv.py
+++ b/skbio/stats/distance/_bioenv.py
@@ -16,8 +16,10 @@ from scipy.spatial.distance import pdist
 from scipy.stats import spearmanr
 
 from skbio.stats.distance import DistanceMatrix
+from skbio.util._decorator import experimental
 
 
+ at experimental(as_of="0.4.0")
 def bioenv(distance_matrix, data_frame, columns=None):
     """Find subset of variables maximally correlated with distances.
 
@@ -104,19 +106,11 @@ def bioenv(distance_matrix, data_frame, columns=None):
 
     Examples
     --------
-    Import the functionality we'll use in the following examples. The call to
-    ``pd.set_option`` ensures consistent data frame formatting across
-    different versions of pandas. This call is not necessary for normal
-    use; it is only included here so that the doctests will pass.
+    Import the functionality we'll use in the following examples:
 
     >>> import pandas as pd
     >>> from skbio import DistanceMatrix
     >>> from skbio.stats.distance import bioenv
-    >>> try:
-    ...     # not necessary for normal use
-    ...     pd.set_option('show_dimensions', True)
-    ... except KeyError:
-    ...     pass
 
     Load a 4x4 community distance matrix:
 
@@ -150,8 +144,6 @@ def bioenv(distance_matrix, data_frame, columns=None):
     vars
     pH                1     0.771517
     pH, Elevation     2     0.714286
-    <BLANKLINE>
-    [2 rows x 2 columns]
 
     We see that in this simple example, pH alone is maximally rank-correlated
     with the community distances (:math:`\\rho=0.771517`).
diff --git a/skbio/stats/distance/_mantel.py b/skbio/stats/distance/_mantel.py
index e7376a4..fa4a73e 100644
--- a/skbio/stats/distance/_mantel.py
+++ b/skbio/stats/distance/_mantel.py
@@ -18,8 +18,10 @@ import scipy.misc
 from scipy.stats import pearsonr, spearmanr
 
 from skbio.stats.distance import DistanceMatrix
+from skbio.util._decorator import experimental
 
 
+ at experimental(as_of="0.4.0")
 def mantel(x, y, method='pearson', permutations=999, alternative='two-sided',
            strict=True, lookup=None):
     """Compute correlation between distance matrices using the Mantel test.
@@ -297,6 +299,7 @@ def mantel(x, y, method='pearson', permutations=999, alternative='two-sided',
     return orig_stat, p_value, n
 
 
+ at experimental(as_of="0.4.0")
 def pwmantel(dms, labels=None, method='pearson', permutations=999,
              alternative='two-sided', strict=True, lookup=None):
     """Run Mantel tests for every pair of given distance matrices.
@@ -353,19 +356,10 @@ def pwmantel(dms, labels=None, method='pearson', permutations=999,
 
     Examples
     --------
-    Import the functionality we'll use in the following examples. The call to
-    ``pd.set_option`` ensures consistent ``DataFrame`` formatting across
-    different versions of pandas. This call is not necessary for normal
-    use; it is only included here so that the doctests will pass.
+    Import the functionality we'll use in the following examples:
 
-    >>> import pandas as pd
     >>> from skbio import DistanceMatrix
     >>> from skbio.stats.distance import pwmantel
-    >>> try:
-    ...     # not necessary for normal use
-    ...     pd.set_option('show_dimensions', True)
-    ... except KeyError:
-    ...     pass
 
     Define three 3x3 distance matrices:
 
@@ -389,8 +383,6 @@ def pwmantel(dms, labels=None, method='pearson', permutations=999,
     x   y     0.755929     NaN  3  pearson             0   two-sided
         z    -0.755929     NaN  3  pearson             0   two-sided
     y   z    -0.142857     NaN  3  pearson             0   two-sided
-    <BLANKLINE>
-    [3 rows x 6 columns]
 
     Note that we passed ``permutations=0`` to suppress significance tests; the
     p-values in the output are labelled ``NaN``.
diff --git a/skbio/stats/distance/_permanova.py b/skbio/stats/distance/_permanova.py
index 1ec31f6..95d27ad 100644
--- a/skbio/stats/distance/_permanova.py
+++ b/skbio/stats/distance/_permanova.py
@@ -9,15 +9,15 @@
 from __future__ import absolute_import, division, print_function
 from future.builtins import range
 
-import warnings
 from functools import partial
 
 import numpy as np
 
-from ._base import (_preprocess_input, _run_monte_carlo_stats, _build_results,
-                    CategoricalStats)
+from ._base import (_preprocess_input, _run_monte_carlo_stats, _build_results)
+from skbio.util._decorator import experimental
 
 
+ at experimental(as_of="0.4.0")
 def permanova(distance_matrix, grouping, column=None, permutations=999):
     """Test for significant differences between groups using PERMANOVA.
 
@@ -136,86 +136,3 @@ def _compute_f_stat(sample_size, num_groups, tri_idxs, distances, group_sizes,
 def _index_combinations(indices):
     # Modified from http://stackoverflow.com/a/11144716
     return np.tile(indices, len(indices)), np.repeat(indices, len(indices))
-
-
-class PERMANOVA(CategoricalStats):
-    """PERMANOVA statistical method executor.
-
-    .. note:: Deprecated in scikit-bio 0.2.1-dev
-       ``PERMANOVA`` will be removed in scikit-bio 0.3.0. It is replaced by
-       ``permanova``, which provides a simpler procedural interface to running
-       this statistical method.
-
-    Permutational Multivariate Analysis of Variance (PERMANOVA) is a
-    non-parametric method that tests whether two or more groups of objects are
-    significantly different based on a categorical factor. It is conceptually
-    similar to ANOVA except that it operates on a distance matrix, which allows
-    for multivariate analysis. PERMANOVA computes a pseudo-F statistic and
-    tests the significance through permutations.
-
-    Notes
-    -----
-    See [1]_ for the original PERMANOVA reference, as well as
-    ``vegan::adonis``, available in R's vegan package [2]_.
-
-    References
-    ----------
-    .. [1] Anderson, Marti J. "A new method for non-parametric multivariate
-       analysis of variance." Austral Ecology 26.1 (2001): 32-46.
-
-    .. [2] http://cran.r-project.org/web/packages/vegan/index.html
-
-    """
-
-    short_method_name = 'PERMANOVA'
-    long_method_name = 'Permutational Multivariate Analysis of Variance'
-    test_statistic_name = 'pseudo-F statistic'
-
-    def __init__(self, distance_matrix, grouping, column=None):
-        warnings.warn(
-            "skbio.stats.distance.PERMANOVA is deprecated and will be removed "
-            "in scikit-bio 0.3.0. Please update your code to use "
-            "skbio.stats.distance.permanova.", DeprecationWarning)
-
-        super(PERMANOVA, self).__init__(distance_matrix, grouping,
-                                        column=column)
-
-        # Calculate number of objects in each group.
-        self._group_sizes = np.bincount(self._grouping)
-        self._num_groups = len(self._groups)
-        self._distances = self._dm.condensed_form()
-        self._s_T = (self._distances ** 2).sum() / self._dm.shape[0]
-
-    def _run(self, grouping):
-        """Compute PERMANOVA pseudo-F statistic."""
-        # Create a matrix where objects in the same group are marked with the
-        # group index (e.g. 0, 1, 2, etc.). objects that are not in the same
-        # group are marked with -1.
-        grouping_matrix = -1 * np.ones(self._dm.shape, dtype=int)
-        for group_idx in range(len(self._groups)):
-            within_indices = self._index_combinations(
-                np.where(grouping == group_idx)[0])
-            grouping_matrix[within_indices] = group_idx
-
-        # Extract upper triangle (in same order as distances were extracted
-        # from full distance matrix).
-        grouping_tri = grouping_matrix[self._tri_idxs]
-
-        return self._compute_f_stat(grouping_tri)
-
-    def _index_combinations(self, indices):
-        # Modified from http://stackoverflow.com/a/11144716
-        return np.tile(indices, len(indices)), np.repeat(indices, len(indices))
-
-    def _compute_f_stat(self, grouping_tri):
-        a = self._num_groups
-        N = self._dm.shape[0]
-
-        # Calculate s_W for each group, accounting for different group sizes.
-        s_W = 0
-        for i in range(a):
-            s_W += ((self._distances[grouping_tri == i] ** 2).sum() /
-                    self._group_sizes[i])
-
-        s_A = self._s_T - s_W
-        return (s_A / (a - 1)) / (s_W / (N - a))
diff --git a/skbio/stats/distance/tests/__init__.py b/skbio/stats/distance/tests/__init__.py
index c99682c..3fe3dc6 100644
--- a/skbio/stats/distance/tests/__init__.py
+++ b/skbio/stats/distance/tests/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -7,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/stats/distance/tests/test_anosim.py b/skbio/stats/distance/tests/test_anosim.py
index f839040..2f671bb 100644
--- a/skbio/stats/distance/tests/test_anosim.py
+++ b/skbio/stats/distance/tests/test_anosim.py
@@ -13,12 +13,11 @@ from functools import partial
 from unittest import TestCase, main
 
 import numpy as np
-import numpy.testing as npt
 import pandas as pd
 from pandas.util.testing import assert_series_equal
 
 from skbio import DistanceMatrix
-from skbio.stats.distance import anosim, ANOSIM
+from skbio.stats.distance import anosim
 
 
 class TestANOSIM(TestCase):
@@ -76,7 +75,8 @@ class TestANOSIM(TestCase):
         # inputs. Also ensure we get the same results if we run the method
         # using a grouping vector or a data frame with equivalent groupings.
         exp = pd.Series(index=self.exp_index,
-                        data=['ANOSIM', 'R', 4, 2, 0.25, 0.671, 999])
+                        data=['ANOSIM', 'R', 4, 2, 0.25, 0.671, 999],
+                        name='ANOSIM results')
 
         for _ in range(2):
             np.random.seed(0)
@@ -90,20 +90,23 @@ class TestANOSIM(TestCase):
 
     def test_no_ties(self):
         exp = pd.Series(index=self.exp_index,
-                        data=['ANOSIM', 'R', 4, 2, 0.625, 0.332, 999])
+                        data=['ANOSIM', 'R', 4, 2, 0.625, 0.332, 999],
+                        name='ANOSIM results')
         np.random.seed(0)
         obs = anosim(self.dm_no_ties, self.grouping_equal)
         self.assert_series_equal(obs, exp)
 
     def test_no_permutations(self):
         exp = pd.Series(index=self.exp_index,
-                        data=['ANOSIM', 'R', 4, 2, 0.625, np.nan, 0])
+                        data=['ANOSIM', 'R', 4, 2, 0.625, np.nan, 0],
+                        name='ANOSIM results')
         obs = anosim(self.dm_no_ties, self.grouping_equal, permutations=0)
         self.assert_series_equal(obs, exp)
 
     def test_unequal_group_sizes(self):
         exp = pd.Series(index=self.exp_index,
-                        data=['ANOSIM', 'R', 6, 3, -0.363636, 0.878, 999])
+                        data=['ANOSIM', 'R', 6, 3, -0.363636, 0.878, 999],
+                        name='ANOSIM results')
 
         np.random.seed(0)
         obs = anosim(self.dm_unequal, self.grouping_unequal)
@@ -114,89 +117,5 @@ class TestANOSIM(TestCase):
         self.assert_series_equal(obs, exp)
 
 
-class TestANOSIMClass(TestCase):
-    """All results were verified with R (vegan::anosim)."""
-
-    def setUp(self):
-        # Distance matrices with and without ties in the ranks, with 2 groups
-        # of equal size.
-        dm_ids = ['s1', 's2', 's3', 's4']
-        grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
-        df = pd.read_csv(
-            StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
-                     's1,Control'), index_col=0)
-
-        self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
-                                       [1, 0, 3, 2],
-                                       [1, 3, 0, 3],
-                                       [4, 2, 3, 0]], dm_ids)
-
-        self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
-                                          [1, 0, 3, 2],
-                                          [5, 3, 0, 3],
-                                          [4, 2, 3, 0]], dm_ids)
-
-        # Test with 3 groups of unequal size. This data also generates a
-        # negative R statistic.
-        grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
-                            'Treatment1', 'Control', 'Control']
-
-        self.dm_unequal = DistanceMatrix(
-            [[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
-             [1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
-             [0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
-             [0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
-             [1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
-             [1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
-            ['s1', 's2', 's3', 's4', 's5', 's6'])
-
-        self.anosim_ties = ANOSIM(self.dm_ties, grouping_equal)
-        self.anosim_no_ties = ANOSIM(self.dm_no_ties, grouping_equal)
-        self.anosim_ties_df = ANOSIM(self.dm_ties, df, column='Group')
-        self.anosim_unequal = ANOSIM(self.dm_unequal, grouping_unequal)
-
-    def test_call_ties(self):
-        # Ensure we get the same results if we rerun the method on the same
-        # object. Also ensure we get the same results if we run the method
-        # using a grouping vector or a data frame with equivalent groupings.
-        for inst in self.anosim_ties, self.anosim_ties_df:
-            for trial in range(2):
-                np.random.seed(0)
-                obs = inst()
-                self.assertEqual(obs.sample_size, 4)
-                npt.assert_array_equal(obs.groups,
-                                       ['Control', 'Fast'])
-                self.assertAlmostEqual(obs.statistic, 0.25)
-                self.assertAlmostEqual(obs.p_value, 0.671)
-                self.assertEqual(obs.permutations, 999)
-
-    def test_call_no_ties(self):
-        np.random.seed(0)
-        obs = self.anosim_no_ties()
-        self.assertEqual(obs.sample_size, 4)
-        npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
-        self.assertAlmostEqual(obs.statistic, 0.625)
-        self.assertAlmostEqual(obs.p_value, 0.332)
-        self.assertEqual(obs.permutations, 999)
-
-    def test_call_no_permutations(self):
-        obs = self.anosim_no_ties(0)
-        self.assertEqual(obs.sample_size, 4)
-        npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
-        self.assertAlmostEqual(obs.statistic, 0.625)
-        self.assertEqual(obs.p_value, None)
-        self.assertEqual(obs.permutations, 0)
-
-    def test_call_unequal_group_sizes(self):
-        np.random.seed(0)
-        obs = self.anosim_unequal()
-        self.assertEqual(obs.sample_size, 6)
-        npt.assert_array_equal(obs.groups,
-                               ['Control', 'Treatment1', 'Treatment2'])
-        self.assertAlmostEqual(obs.statistic, -0.363636, 6)
-        self.assertAlmostEqual(obs.p_value, 0.878)
-        self.assertEqual(obs.permutations, 999)
-
-
 if __name__ == '__main__':
     main()
diff --git a/skbio/stats/distance/tests/test_base.py b/skbio/stats/distance/tests/test_base.py
index 10cca1f..7c380ec 100644
--- a/skbio/stats/distance/tests/test_base.py
+++ b/skbio/stats/distance/tests/test_base.py
@@ -21,9 +21,9 @@ from IPython.core.display import Image, SVG
 from skbio import DistanceMatrix
 from skbio.stats.distance import (
     DissimilarityMatrixError, DistanceMatrixError, MissingIDError,
-    DissimilarityMatrix, randdm, CategoricalStatsResults)
-from skbio.stats.distance._base import (
-    _preprocess_input, _run_monte_carlo_stats, CategoricalStats)
+    DissimilarityMatrix, randdm)
+from skbio.stats.distance._base import (_preprocess_input,
+                                        _run_monte_carlo_stats)
 
 
 class DissimilarityMatrixTestData(TestCase):
@@ -56,15 +56,6 @@ class DissimilarityMatrixTests(DissimilarityMatrixTestData):
                                    np.array(self.dm_2x2_asym_data),
                                    np.array(self.dm_3x3_data)]
 
-    def test_deprecated_io(self):
-        fh = StringIO()
-        npt.assert_warns(DeprecationWarning, self.dm_3x3.to_file, fh)
-        fh.seek(0)
-        deserialized = npt.assert_warns(DeprecationWarning,
-                                        DissimilarityMatrix.from_file, fh)
-        self.assertEqual(deserialized, self.dm_3x3)
-        self.assertTrue(type(deserialized) == DissimilarityMatrix)
-
     def test_init_from_dm(self):
         ids = ['foo', 'bar', 'baz']
 
@@ -459,15 +450,6 @@ class DistanceMatrixTests(DissimilarityMatrixTestData):
         self.dm_condensed_forms = [np.array([]), np.array([0.123]),
                                    np.array([0.01, 4.2, 12.0])]
 
-    def test_deprecated_io(self):
-        fh = StringIO()
-        npt.assert_warns(DeprecationWarning, self.dm_3x3.to_file, fh)
-        fh.seek(0)
-        deserialized = npt.assert_warns(DeprecationWarning,
-                                        DistanceMatrix.from_file, fh)
-        self.assertEqual(deserialized, self.dm_3x3)
-        self.assertTrue(type(deserialized) == DistanceMatrix)
-
     def test_init_invalid_input(self):
         # Asymmetric.
         data = [[0.0, 2.0], [1.0, 0.0]]
@@ -560,6 +542,10 @@ class RandomDistanceMatrixTests(TestCase):
 
         self.assertTrue(found_diff)
 
+    def test_large_matrix_for_symmetry(self):
+        obs3 = randdm(100)
+        self.assertEqual(obs3, obs3.T)
+
     def test_ids(self):
         ids = ['foo', 'bar', 'baz']
         obs = randdm(3, ids=ids)
@@ -671,96 +657,5 @@ class CategoricalStatsHelperFunctionTests(TestCase):
             _run_monte_carlo_stats(lambda e: 42, self.grouping, -1)
 
 
-class CategoricalStatsTests(TestCase):
-    def setUp(self):
-        self.dm = DistanceMatrix([[0.0, 1.0, 2.0], [1.0, 0.0, 3.0],
-                                  [2.0, 3.0, 0.0]], ['a', 'b', 'c'])
-        self.grouping = [1, 2, 1]
-        # Ordering of IDs shouldn't matter, nor should extra IDs.
-        self.df = pd.read_csv(
-            StringIO('ID,Group\nb,Group1\na,Group2\nc,Group1\nd,Group3'),
-            index_col=0)
-        self.df_missing_id = pd.read_csv(
-            StringIO('ID,Group\nb,Group1\nc,Group1'), index_col=0)
-        self.categorical_stats = CategoricalStats(self.dm, self.grouping)
-        self.categorical_stats_from_df = CategoricalStats(self.dm, self.df,
-                                                          column='Group')
-
-    def test_init_invalid_input(self):
-        # Requires a DistanceMatrix.
-        with self.assertRaises(TypeError):
-            CategoricalStats(DissimilarityMatrix([[0, 2], [3, 0]], ['a', 'b']),
-                             [1, 2])
-
-        # Requires column if DataFrame.
-        with self.assertRaises(ValueError):
-            CategoricalStats(self.dm, self.df)
-
-        # Cannot provide column if not data frame.
-        with self.assertRaises(ValueError):
-            CategoricalStats(self.dm, self.grouping, column='Group')
-
-        # Column must exist in data frame.
-        with self.assertRaises(ValueError):
-            CategoricalStats(self.dm, self.df, column='foo')
-
-        # All distance matrix IDs must be in data frame.
-        with self.assertRaises(ValueError):
-            CategoricalStats(self.dm, self.df_missing_id, column='Group')
-
-        # Grouping vector length must match number of objects in dm.
-        with self.assertRaises(ValueError):
-            CategoricalStats(self.dm, [1, 2])
-
-        # Grouping vector cannot have only unique values.
-        with self.assertRaises(ValueError):
-            CategoricalStats(self.dm, [1, 2, 3])
-
-        # Grouping vector cannot have only a single group.
-        with self.assertRaises(ValueError):
-            CategoricalStats(self.dm, [1, 1, 1])
-
-    def test_call(self):
-        with self.assertRaises(NotImplementedError):
-            self.categorical_stats()
-
-    def test_call_invalid_permutations(self):
-        with self.assertRaises(ValueError):
-            self.categorical_stats(-1)
-
-
-class CategoricalStatsResultsTests(TestCase):
-    def setUp(self):
-        self.results = CategoricalStatsResults('foo', 'Foo', 'my stat', 42,
-                                               ['a', 'b', 'c', 'd'],
-                                               0.01234567890, 0.1151111, 99)
-
-    def test_str(self):
-        exp = ('Method name  Sample size  Number of groups       my stat  '
-               'p-value  Number of permutations\n        foo           42'
-               '                 4  0.0123456789     0.12'
-               '                      99\n')
-        obs = str(self.results)
-        self.assertEqual(obs, exp)
-
-    def test_repr_html(self):
-        # Not going to test against exact HTML that we expect, as this could
-        # easily break and be annoying to constantly update. Do some light
-        # sanity-checking to ensure there are some of the expected HTML tags.
-        obs = self.results._repr_html_()
-        self.assertTrue('<table' in obs)
-        self.assertTrue('<thead' in obs)
-        self.assertTrue('<tr' in obs)
-        self.assertTrue('<th' in obs)
-        self.assertTrue('<tbody' in obs)
-        self.assertTrue('<td' in obs)
-
-    def test_summary(self):
-        exp = ('Method name\tSample size\tNumber of groups\tmy stat\tp-value\t'
-               'Number of permutations\nfoo\t42\t4\t0.0123456789\t0.12\t99\n')
-        obs = self.results.summary()
-        self.assertEqual(obs, exp)
-
-
 if __name__ == '__main__':
     main()
diff --git a/skbio/stats/distance/tests/test_bioenv.py b/skbio/stats/distance/tests/test_bioenv.py
index e83fdc5..54a2c33 100644
--- a/skbio/stats/distance/tests/test_bioenv.py
+++ b/skbio/stats/distance/tests/test_bioenv.py
@@ -11,12 +11,11 @@ from unittest import TestCase, main
 
 import numpy as np
 import pandas as pd
-from pandas.util.testing import assert_frame_equal
 
 from skbio import DistanceMatrix
 from skbio.stats.distance import bioenv
 from skbio.stats.distance._bioenv import _scale
-from skbio.util import get_data_path
+from skbio.util import get_data_path, assert_data_frame_almost_equal
 
 
 class BIOENVTests(TestCase):
@@ -91,34 +90,36 @@ class BIOENVTests(TestCase):
     def test_bioenv_all_columns_implicit(self):
         # Test with all columns in data frame (implicitly).
         obs = bioenv(self.dm, self.df)
-        assert_frame_equal(obs, self.exp_results)
+        assert_data_frame_almost_equal(obs, self.exp_results)
 
         # Should get the same results if order of rows/cols in distance matrix
         # is changed.
         obs = bioenv(self.dm_reordered, self.df)
-        assert_frame_equal(obs, self.exp_results)
+        assert_data_frame_almost_equal(obs, self.exp_results)
 
     def test_bioenv_all_columns_explicit(self):
         # Test with all columns being specified.
         obs = bioenv(self.dm, self.df, columns=self.cols)
-        assert_frame_equal(obs, self.exp_results)
+        assert_data_frame_almost_equal(obs, self.exp_results)
 
         # Test against a data frame that has an extra non-numeric column and
         # some of the rows and columns reordered (we should get the same
         # result since we're specifying the same columns in the same order).
         obs = bioenv(self.dm, self.df_extra_column, columns=self.cols)
-        assert_frame_equal(obs, self.exp_results)
+        assert_data_frame_almost_equal(obs, self.exp_results)
 
     def test_bioenv_single_column(self):
         obs = bioenv(self.dm, self.df, columns=['PH'])
-        assert_frame_equal(obs, self.exp_results_single_column)
+        assert_data_frame_almost_equal(obs, self.exp_results_single_column)
 
     def test_bioenv_different_column_order(self):
         # Specifying columns in a different order will change the row labels in
         # the results data frame as the column subsets will be reordered, but
         # the actual results (e.g., correlation coefficients) shouldn't change.
         obs = bioenv(self.dm, self.df, columns=self.cols[::-1])
-        assert_frame_equal(obs, self.exp_results_different_column_order)
+        assert_data_frame_almost_equal(
+            obs,
+            self.exp_results_different_column_order)
 
     def test_bioenv_no_side_effects(self):
         # Deep copies of both primary inputs.
@@ -130,7 +131,7 @@ class BIOENVTests(TestCase):
         # Make sure we haven't modified the primary input in some way (e.g.,
         # with scaling, type conversions, etc.).
         self.assertEqual(self.dm, dm_copy)
-        assert_frame_equal(self.df, df_copy)
+        assert_data_frame_almost_equal(self.df, df_copy)
 
     def test_bioenv_vegan_example(self):
         # The correlation coefficient in the first row of the
@@ -146,7 +147,7 @@ class BIOENVTests(TestCase):
         # same distances yields *very* similar results. Thus, the discrepancy
         # seems to stem from differences when computing ranks/ties.
         obs = bioenv(self.dm_vegan, self.df_vegan)
-        assert_frame_equal(obs, self.exp_results_vegan)
+        assert_data_frame_almost_equal(obs, self.exp_results_vegan)
 
     def test_bioenv_no_distance_matrix(self):
         with self.assertRaises(TypeError):
@@ -192,7 +193,7 @@ class BIOENVTests(TestCase):
         exp = pd.DataFrame([[0.0], [-1.0], [1.0]], index=['A', 'B', 'C'],
                            columns=['foo'])
         obs = _scale(df)
-        assert_frame_equal(obs, exp)
+        assert_data_frame_almost_equal(obs, exp)
 
     def test_scale_multiple_columns(self):
         # Floats and ints, including positives and negatives.
@@ -209,7 +210,7 @@ class BIOENVTests(TestCase):
                            index=['A', 'B', 'C', 'D'],
                            columns=['pH', 'Elevation', 'negatives'])
         obs = _scale(df)
-        assert_frame_equal(obs, exp)
+        assert_data_frame_almost_equal(obs, exp)
 
     def test_scale_no_variance(self):
         df = pd.DataFrame([[-7.0, -1.2], [6.2, -1.2], [2.9, -1.2]],
diff --git a/skbio/stats/distance/tests/test_mantel.py b/skbio/stats/distance/tests/test_mantel.py
index cff8f4c..f6c8173 100644
--- a/skbio/stats/distance/tests/test_mantel.py
+++ b/skbio/stats/distance/tests/test_mantel.py
@@ -7,18 +7,19 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
+import six
+
 from unittest import TestCase, main
 
 import numpy as np
 import numpy.testing as npt
 import pandas as pd
-from pandas.util.testing import assert_frame_equal
 
 from skbio import DistanceMatrix
 from skbio.stats.distance import (DissimilarityMatrixError,
                                   DistanceMatrixError, mantel, pwmantel)
 from skbio.stats.distance._mantel import _order_dms
-from skbio.util import get_data_path
+from skbio.util import get_data_path, assert_data_frame_almost_equal
 
 
 class MantelTestData(TestCase):
@@ -355,31 +356,33 @@ class PairwiseMantelTests(MantelTestData):
 
         # input as DistanceMatrix instances
         obs = pwmantel(self.min_dms, alternative='greater')
-        assert_frame_equal(obs, self.exp_results_minimal)
+        assert_data_frame_almost_equal(obs, self.exp_results_minimal)
 
         np.random.seed(0)
 
         # input as array_like
         obs = pwmantel((self.minx, self.miny, self.minz),
                        alternative='greater')
-        assert_frame_equal(obs, self.exp_results_minimal)
+        assert_data_frame_almost_equal(obs, self.exp_results_minimal)
 
     def test_minimal_compatible_input_with_labels(self):
         np.random.seed(0)
 
         obs = pwmantel(self.min_dms, alternative='greater',
                        labels=('minx', 'miny', 'minz'))
-        assert_frame_equal(obs, self.exp_results_minimal_with_labels)
+        assert_data_frame_almost_equal(
+            obs,
+            self.exp_results_minimal_with_labels)
 
     def test_duplicate_dms(self):
         obs = pwmantel((self.minx_dm, self.minx_dm, self.minx_dm),
                        alternative='less')
-        assert_frame_equal(obs, self.exp_results_duplicate_dms)
+        assert_data_frame_almost_equal(obs, self.exp_results_duplicate_dms)
 
     def test_na_p_value(self):
         obs = pwmantel((self.miny_dm, self.minx_dm), method='spearman',
                        permutations=0)
-        assert_frame_equal(obs, self.exp_results_na_p_value)
+        assert_data_frame_almost_equal(obs, self.exp_results_na_p_value)
 
     def test_reordered_distance_matrices(self):
         # Matrices have matching IDs but they all have different ordering.
@@ -390,7 +393,9 @@ class PairwiseMantelTests(MantelTestData):
         np.random.seed(0)
 
         obs = pwmantel((x, y, z), alternative='greater')
-        assert_frame_equal(obs, self.exp_results_reordered_distance_matrices)
+        assert_data_frame_almost_equal(
+            obs,
+            self.exp_results_reordered_distance_matrices)
 
     def test_strict(self):
         # Matrices have some matching and nonmatching IDs, with different
@@ -403,7 +408,9 @@ class PairwiseMantelTests(MantelTestData):
 
         # strict=False should discard IDs that aren't found in both matrices
         obs = pwmantel((x, y, z), alternative='greater', strict=False)
-        assert_frame_equal(obs, self.exp_results_reordered_distance_matrices)
+        assert_data_frame_almost_equal(
+            obs,
+            self.exp_results_reordered_distance_matrices)
 
     def test_id_lookup(self):
         # Matrices have mismatched IDs but a lookup is provided.
@@ -425,7 +432,9 @@ class PairwiseMantelTests(MantelTestData):
 
         obs = pwmantel((x, y, z), alternative='greater', strict=False,
                        lookup=lookup)
-        assert_frame_equal(obs, self.exp_results_reordered_distance_matrices)
+        assert_data_frame_almost_equal(
+            obs,
+            self.exp_results_reordered_distance_matrices)
 
         # Make sure the inputs aren't modified.
         self.assertEqual(x, x_copy)
@@ -457,7 +466,7 @@ class PairwiseMantelTests(MantelTestData):
         np.random.seed(0)
 
         obs = pwmantel(dms)
-        assert_frame_equal(obs, self.exp_results_dm_dm2)
+        assert_data_frame_almost_equal(obs, self.exp_results_dm_dm2)
 
     def test_many_filepaths_as_input(self):
         dms = [
@@ -469,7 +478,7 @@ class PairwiseMantelTests(MantelTestData):
         np.random.seed(0)
 
         obs = pwmantel(dms)
-        assert_frame_equal(obs, self.exp_results_all_dms)
+        assert_data_frame_almost_equal(obs, self.exp_results_all_dms)
 
 
 class OrderDistanceMatricesTests(MantelTestData):
@@ -534,7 +543,7 @@ class OrderDistanceMatricesTests(MantelTestData):
         # for the first distance matrix.
         lookup = {'0': 'a', '2': 'c'}
 
-        with self.assertRaisesRegexp(KeyError, "first.*(x).*'1'\"$"):
+        with six.assertRaisesRegex(self, KeyError, "first.*(x).*'1'\"$"):
             _order_dms(self.minx_dm, self.miny_dm, lookup=lookup)
 
         # Mapping for 'bar' is missing. Should get an error while remapping IDs
@@ -543,7 +552,7 @@ class OrderDistanceMatricesTests(MantelTestData):
                   'foo': 'a', 'baz': 'c'}
         self.miny_dm.ids = ('foo', 'bar', 'baz')
 
-        with self.assertRaisesRegexp(KeyError, "second.*(y).*'bar'\"$"):
+        with six.assertRaisesRegex(self, KeyError, "second.*(y).*'bar'\"$"):
             _order_dms(self.minx_dm, self.miny_dm, lookup=lookup)
 
     def test_nonmatching_ids_strict_true(self):
diff --git a/skbio/stats/distance/tests/test_permanova.py b/skbio/stats/distance/tests/test_permanova.py
index 4c2f1a8..f15803e 100644
--- a/skbio/stats/distance/tests/test_permanova.py
+++ b/skbio/stats/distance/tests/test_permanova.py
@@ -13,12 +13,11 @@ from functools import partial
 from unittest import TestCase, main
 
 import numpy as np
-import numpy.testing as npt
 import pandas as pd
 from pandas.util.testing import assert_series_equal
 
 from skbio import DistanceMatrix
-from skbio.stats.distance import permanova, PERMANOVA
+from skbio.stats.distance import permanova
 
 
 class TestPERMANOVA(TestCase):
@@ -75,7 +74,8 @@ class TestPERMANOVA(TestCase):
         # inputs. Also ensure we get the same results if we run the method
         # using a grouping vector or a data frame with equivalent groupings.
         exp = pd.Series(index=self.exp_index,
-                        data=['PERMANOVA', 'pseudo-F', 4, 2, 2.0, 0.671, 999])
+                        data=['PERMANOVA', 'pseudo-F', 4, 2, 2.0, 0.671, 999],
+                        name='PERMANOVA results')
 
         for _ in range(2):
             np.random.seed(0)
@@ -89,21 +89,24 @@ class TestPERMANOVA(TestCase):
 
     def test_call_no_ties(self):
         exp = pd.Series(index=self.exp_index,
-                        data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, 0.332, 999])
+                        data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, 0.332, 999],
+                        name='PERMANOVA results')
         np.random.seed(0)
         obs = permanova(self.dm_no_ties, self.grouping_equal)
         self.assert_series_equal(obs, exp)
 
     def test_call_no_permutations(self):
         exp = pd.Series(index=self.exp_index,
-                        data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, np.nan, 0])
+                        data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, np.nan, 0],
+                        name='PERMANOVA results')
         obs = permanova(self.dm_no_ties, self.grouping_equal, permutations=0)
         self.assert_series_equal(obs, exp)
 
     def test_call_unequal_group_sizes(self):
-        exp = pd.Series(index=self.exp_index,
-                        data=['PERMANOVA', 'pseudo-F', 6, 3, 0.578848, 0.645,
-                              999])
+        exp = pd.Series(
+            index=self.exp_index,
+            data=['PERMANOVA', 'pseudo-F', 6, 3, 0.578848, 0.645, 999],
+            name='PERMANOVA results')
 
         np.random.seed(0)
         obs = permanova(self.dm_unequal, self.grouping_unequal)
@@ -114,87 +117,5 @@ class TestPERMANOVA(TestCase):
         self.assert_series_equal(obs, exp)
 
 
-class TestPERMANOVAClass(TestCase):
-    """All results were verified with R (vegan::adonis)."""
-
-    def setUp(self):
-        # Distance matrices with and without ties in the ranks, with 2 groups
-        # of equal size.
-        dm_ids = ['s1', 's2', 's3', 's4']
-        grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
-        df = pd.read_csv(
-            StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
-                     's1,Control'), index_col=0)
-
-        self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
-                                       [1, 0, 3, 2],
-                                       [1, 3, 0, 3],
-                                       [4, 2, 3, 0]], dm_ids)
-
-        self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
-                                          [1, 0, 3, 2],
-                                          [5, 3, 0, 3],
-                                          [4, 2, 3, 0]], dm_ids)
-
-        # Test with 3 groups of unequal size.
-        grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
-                            'Treatment1', 'Control', 'Control']
-
-        self.dm_unequal = DistanceMatrix(
-            [[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
-             [1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
-             [0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
-             [0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
-             [1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
-             [1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
-            ['s1', 's2', 's3', 's4', 's5', 's6'])
-
-        self.permanova_ties = PERMANOVA(self.dm_ties, grouping_equal)
-        self.permanova_no_ties = PERMANOVA(self.dm_no_ties, grouping_equal)
-        self.permanova_ties_df = PERMANOVA(self.dm_ties, df, column='Group')
-        self.permanova_unequal = PERMANOVA(self.dm_unequal, grouping_unequal)
-
-    def test_call_ties(self):
-        # Ensure we get the same results if we rerun the method on the same
-        # object. Also ensure we get the same results if we run the method
-        # using a grouping vector or a data frame with equivalent groupings.
-        for inst in self.permanova_ties, self.permanova_ties_df:
-            for trial in range(2):
-                np.random.seed(0)
-                obs = inst()
-                self.assertEqual(obs.sample_size, 4)
-                npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
-                self.assertAlmostEqual(obs.statistic, 2.0)
-                self.assertAlmostEqual(obs.p_value, 0.671)
-                self.assertEqual(obs.permutations, 999)
-
-    def test_call_no_ties(self):
-        np.random.seed(0)
-        obs = self.permanova_no_ties()
-        self.assertEqual(obs.sample_size, 4)
-        npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
-        self.assertAlmostEqual(obs.statistic, 4.4)
-        self.assertAlmostEqual(obs.p_value, 0.332)
-        self.assertEqual(obs.permutations, 999)
-
-    def test_call_no_permutations(self):
-        obs = self.permanova_no_ties(0)
-        self.assertEqual(obs.sample_size, 4)
-        npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
-        self.assertAlmostEqual(obs.statistic, 4.4)
-        self.assertEqual(obs.p_value, None)
-        self.assertEqual(obs.permutations, 0)
-
-    def test_call_unequal_group_sizes(self):
-        np.random.seed(0)
-        obs = self.permanova_unequal()
-        self.assertEqual(obs.sample_size, 6)
-        npt.assert_array_equal(obs.groups,
-                               ['Control', 'Treatment1', 'Treatment2'])
-        self.assertAlmostEqual(obs.statistic, 0.578848, 6)
-        self.assertAlmostEqual(obs.p_value, 0.645)
-        self.assertEqual(obs.permutations, 999)
-
-
 if __name__ == '__main__':
     main()
diff --git a/skbio/stats/evolve/__init__.py b/skbio/stats/evolve/__init__.py
new file mode 100644
index 0000000..cbfb9cc
--- /dev/null
+++ b/skbio/stats/evolve/__init__.py
@@ -0,0 +1,41 @@
+"""
+Evolutionary statistics (:mod:`skbio.stats.evolve`)
+===================================================
+
+.. currentmodule:: skbio.stats.evolve
+
+This package contains statistics pertaining to phylogenies and evolution.
+
+Cophylogenetic methods
+----------------------
+
+These functions test for correlation between phylogenies or representations of
+evolutionary distance (for example, genetic distance matrices).
+
+Functions
+^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   hommola_cospeciation
+
+"""
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
+
+from ._hommola import hommola_cospeciation
+
+__all__ = ['hommola_cospeciation']
+
+test = TestRunner(__file__).test
diff --git a/skbio/stats/evolve/_hommola.py b/skbio/stats/evolve/_hommola.py
new file mode 100644
index 0000000..d491de8
--- /dev/null
+++ b/skbio/stats/evolve/_hommola.py
@@ -0,0 +1,268 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+from future.builtins import range
+
+import numpy as np
+from scipy.stats import pearsonr
+
+from skbio import DistanceMatrix
+from skbio.util._decorator import experimental
+
+
+ at experimental(as_of="0.4.0")
+def hommola_cospeciation(host_dist, par_dist, interaction, permutations=999):
+    """Perform Hommola et al (2009) host/parasite cospeciation test.
+
+    This test for host/parasite cospeciation is as described in [1]_. This test
+    is a modification of a Mantel test, expanded to accept the case where
+    multiple hosts map to a single parasite (and vice versa).
+
+    For a basic Mantel test, the distance matrices being compared must have the
+    same number of values. To determine the significance of the correlations
+    between distances in the two matrices, the correlation coefficient of those
+    distances is calculated and compared to the correlation coefficients
+    calculated from a set of matrices in which rows and columns have been
+    permuted.
+
+    In this test, rather than comparing host-host to parasite-parasite
+    distances directly (requiring one host per parasite), the distances are
+    compared for each interaction edge between host and parasite. Thus, a host
+    interacting with two different parasites will be represented in two
+    different edges, with the host-host distance for the comparison between
+    those edges equal to zero, and the parasite-parasite distance equal to the
+    distance between those two parasites. Like in the Mantel test, significance
+    of the interaction is assessed by permutation, in this case permutation of
+    the host-symbiont interaction links.
+
+    Note that the null hypothesis being tested here is that the hosts and
+    parasites have evolved independently of one another. The alternative to
+    this is a somewhat weaker case than what is often implied with the term
+    'cospeciation,' which is that each incidence of host speciation is
+    recapitulated in an incidence of symbiont speciation (strict
+    co-cladogenesis). Although there may be many factors that could contribute
+    to non-independence of host and symbiont phylogenies, this loss of
+    explanatory specificity comes with increased robustness to phylogenetic
+    uncertainty. Thus, this test may be especially useful for cases where host
+    and/or symbiont phylogenies are poorly resolved, or when simple correlation
+    between host and symbiont evolution is of more interest than strict
+    co-cladogenesis.
+
+    This test requires pairwise distance matrices for hosts and symbionts, as
+    well as an interaction matrix specifying links between hosts (in columns)
+    and symbionts (in rows). This interaction matrix should have the same
+    number of columns as the host distance matrix, and the same number of rows
+    as the symbiont distance matrix. Interactions between hosts and symbionts
+    should be indicated by values of ``1`` or ``True``, with non-interactions
+    indicated by values of ``0`` or ``False``.
+
+    Parameters
+    ----------
+    host_dist : 2-D array_like or DistanceMatrix
+        Symmetric matrix of m x m pairwise distances between hosts.
+    par_dist : 2-D array_like or DistanceMatrix
+        Symmetric matrix of n x n pairwise distances between parasites.
+    interaction : 2-D array_like, bool
+        n x m binary matrix of parasite x host interactions. Order of hosts
+        (columns) should be identical to order of hosts in `host_dist`, as
+        should order of parasites (rows) be identical to order of parasites in
+        `par_dist`.
+    permutations : int, optional
+        Number of permutations used to compute p-value. Must be greater than or
+        equal to zero. If zero, statistical significance calculations will be
+        skipped and the p-value will be ``np.nan``.
+
+    Returns
+    -------
+    corr_coeff : float
+        Pearson correlation coefficient of host : parasite association.
+    p_value : float
+        Significance of host : parasite association computed using
+        `permutations` and a one-sided (greater) alternative hypothesis.
+    perm_stats : 1-D numpy.ndarray, float
+        Correlation coefficients observed using permuted host : parasite
+        interactions. Length will be equal to the number of permutations used
+        to compute p-value (see `permutations` parameter above).
+
+    See Also
+    --------
+    skbio.stats.distance.mantel
+    scipy.stats.pearsonr
+
+    Notes
+    -----
+    It is assumed that the ordering of parasites in `par_dist` and hosts in
+    `host_dist` are identical to their ordering in the rows and columns,
+    respectively, of the interaction matrix.
+
+    This code is loosely based on the original R code from [1]_.
+
+    References
+    ----------
+    .. [1] Hommola K, Smith JE, Qiu Y, Gilks WR (2009) A Permutation Test of
+       Host-Parasite Cospeciation. Molecular Biology and Evolution, 26,
+       1457-1468.
+
+    Examples
+    --------
+    >>> from skbio.stats.evolve import hommola_cospeciation
+
+    Create arrays for host distances, parasite distances, and their
+    interactions (data taken from example in [1]_):
+
+    >>> hdist = [[0,3,8,8,9], [3,0,7,7,8], [8,7,0,6,7], [8,7,6,0,3],
+    ...          [9,8,7,3,0]]
+    >>> pdist = [[0,5,8,8,8], [5,0,7,7,7], [8,7,0,4,4], [8,7,4,0,2],
+    ...          [8,7,4,2,0]]
+    >>> interaction = [[1,0,0,0,0], [0,1,0,0,0], [0,0,1,0,0], [0,0,0,1,0],
+    ...                [0,0,0,1,1]]
+
+    Run the cospeciation test with 99 permutations. Note that the correlation
+    coefficient for the observed values counts against the final reported
+    p-value:
+
+    >>> corr_coeff, p_value, perm_stats = hommola_cospeciation(
+    ...     hdist, pdist, interaction, permutations=99)
+    >>> corr_coeff
+    0.83170965463247903
+
+    In this case, the host distances have a fairly strong positive correlation
+    with the symbiont distances. However, this may also reflect structure
+    inherent in the phylogeny, and is not itself indicative of significance.
+
+    >>> p_value <= 0.05
+    True
+
+    After permuting host : parasite interactions, we find that the observed
+    correlation is indeed greater than we would expect by chance.
+
+    """
+    host_dist = DistanceMatrix(host_dist)
+    par_dist = DistanceMatrix(par_dist)
+    interaction = np.asarray(interaction, dtype=bool)
+
+    num_hosts = host_dist.shape[0]
+    num_pars = par_dist.shape[0]
+
+    if num_hosts < 3 or num_pars < 3:
+        raise ValueError("Distance matrices must be a minimum of 3x3 in size.")
+    if num_hosts != interaction.shape[1]:
+        raise ValueError("Number of interaction matrix columns must match "
+                         "number of hosts in `host_dist`.")
+    if num_pars != interaction.shape[0]:
+        raise ValueError("Number of interaction matrix rows must match "
+                         "number of parasites in `par_dist`.")
+    if permutations < 0:
+        raise ValueError("Number of permutations must be greater than or "
+                         "equal to zero.")
+    if interaction.sum() < 3:
+        raise ValueError("Must have at least 3 host-parasite interactions in "
+                         "`interaction`.")
+
+    # shortcut to eliminate nested for-loops specifying pairwise interaction
+    # partners as randomizeable indices
+    pars, hosts = np.nonzero(interaction)
+    pars_k_labels, pars_t_labels = _gen_lists(pars)
+    hosts_k_labels, hosts_t_labels = _gen_lists(hosts)
+
+    # get a vector of pairwise distances for each interaction edge
+    x = _get_dist(hosts_k_labels, hosts_t_labels, host_dist.data,
+                  np.arange(num_hosts))
+    y = _get_dist(pars_k_labels, pars_t_labels, par_dist.data,
+                  np.arange(num_pars))
+
+    # calculate the observed correlation coefficient for these hosts/symbionts
+    corr_coeff = pearsonr(x, y)[0]
+
+    # now do permutatitons. initialize index lists of the appropriate size
+    mp = np.arange(num_pars)
+    mh = np.arange(num_hosts)
+
+    # initialize list of shuffled correlation vals
+    perm_stats = np.empty(permutations)
+
+    if permutations == 0 or np.isnan(corr_coeff):
+        p_value = np.nan
+        perm_stats.fill(np.nan)
+    else:
+        for i in range(permutations):
+            # generate a shuffled list of indexes for each permutation. this
+            # effectively randomizes which host is associated with which
+            # symbiont, but maintains the distribution of genetic distances
+            np.random.shuffle(mp)
+            np.random.shuffle(mh)
+
+            # get pairwise distances in shuffled order
+            y_p = _get_dist(pars_k_labels, pars_t_labels, par_dist.data, mp)
+            x_p = _get_dist(hosts_k_labels, hosts_t_labels, host_dist.data, mh)
+
+            # calculate shuffled correlation coefficient
+            perm_stats[i] = pearsonr(x_p, y_p)[0]
+
+        p_value = ((perm_stats >= corr_coeff).sum() + 1) / (permutations + 1)
+
+    return corr_coeff, p_value, perm_stats
+
+
+def _get_dist(k_labels, t_labels, dists, index):
+    """Subset a distance matrix using a set of (randomizable) index labels.
+
+    Parameters
+    ----------
+    k_labels : numpy.array
+        index labels specifying row-wise member of pairwise interaction
+    t_labels : numpy.array
+        index labels specifying column-wise member of pairwise interaction
+    dists : numpy.array
+        pairwise distance matrix
+    index : numpy.array of int
+        permutable indices for changing order in pairwise distance matrix
+
+    Returns
+    -------
+    vec : list of float
+        List of distances associated with host:parasite edges.
+
+    """
+    return dists[index[k_labels], index[t_labels]]
+
+
+def _gen_lists(labels):
+    """Generate matched lists of row and column index labels.
+
+    Shortcut function for generating matched lists of row and col index
+    labels for the set of pairwise comparisons specified by the list of those
+    indices recovered using ``np.nonzero(interaction)``.
+
+    Reproduces values of iterated indices from the nested for-loops contained
+    in ``get_dist`` function in original code from [1]_.
+
+    Parameters
+    ----------
+    labels : numpy.array
+        array containing the indices of nonzero elements in one dimension of an
+        interaction matrix
+
+    Returns
+    -------
+    k_labels : numpy.array
+        index labels specifying row-wise member of pairwise interaction
+    t_labels : numpy.array
+        index labels specifying column-wise member of pairwise interaction
+
+    References
+    ----------
+    .. [1] Hommola K, Smith JE, Qiu Y, Gilks WR (2009) A Permutation Test of
+       Host-Parasite Cospeciation. Molecular Biology and Evolution, 26,
+       1457-1468.
+
+    """
+    i_array, j_array = np.transpose(np.tri(len(labels)-1)).nonzero()
+    j_array = j_array + 1
+    return labels[i_array], labels[j_array]
diff --git a/skbio/draw/tests/__init__.py b/skbio/stats/evolve/tests/__init__.py
similarity index 84%
copy from skbio/draw/tests/__init__.py
copy to skbio/stats/evolve/tests/__init__.py
index c99682c..3fe3dc6 100644
--- a/skbio/draw/tests/__init__.py
+++ b/skbio/stats/evolve/tests/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -7,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/stats/evolve/tests/test_hommola.py b/skbio/stats/evolve/tests/test_hommola.py
new file mode 100644
index 0000000..e8a264c
--- /dev/null
+++ b/skbio/stats/evolve/tests/test_hommola.py
@@ -0,0 +1,188 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+import unittest
+
+import numpy as np
+import numpy.testing as npt
+
+from skbio.stats.distance import mantel
+from skbio.stats.evolve import hommola_cospeciation
+from skbio.stats.evolve._hommola import _get_dist, _gen_lists
+
+
+class HommolaCospeciationTests(unittest.TestCase):
+    def setUp(self):
+        # Test matrices, as presented in original paper by Hommola et al.
+        self.hdist = np.array([[0, 3, 8, 8, 9], [3, 0, 7, 7, 8], [
+            8, 7, 0, 6, 7], [8, 7, 6, 0, 3], [9, 8, 7, 3, 0]])
+        self.pdist = np.array([[0, 5, 8, 8, 8], [5, 0, 7, 7, 7], [
+            8, 7, 0, 4, 4], [8, 7, 4, 0, 2], [8, 7, 4, 2, 0]])
+        self.interact = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [
+            0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 1]])
+
+        # Reduced-size host matrix for testing asymmetric interaction matrix
+        self.hdist_4x4 = np.array([[0, 3, 8, 8], [3, 0, 7, 7], [8, 7, 0, 6],
+                                  [8, 7, 6, 0]])
+        self.interact_5x4 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
+                                      [0, 0, 0, 1], [0, 0, 0, 1]])
+
+        # One to one interaction matrix for comparing against Mantel output
+        self.interact_1to1 = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [
+            0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]])
+
+        # interaction matrix yielding non-significant results.
+        # this matrix was picked because it will generate an r value that's
+        # less than a standard deviation away from the mean of the normal
+        # distribution of r vals
+        self.interact_ns = np.array(
+            [[0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [1, 0, 0, 0, 0],
+             [1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
+
+        # minimal size matrices for sanity checks of inputs
+        self.h_dist_3x3 = np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]])
+        self.h_dist_2x2 = np.array([[0, 3], [3, 0]])
+        self.p_dist_3x3 = np.array([[0, 3, 2], [3, 0, 1], [2, 1, 0]])
+        self.interact_3x3 = np.array([[0, 1, 1], [1, 0, 1], [0, 0, 1]])
+        self.interact_3x2 = np.array([[0, 1], [1, 0], [1, 1]])
+        self.interact_2x3 = np.array([[0, 1, 1], [1, 0, 1]])
+        self.interact_zero = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
+
+    def test_hommola_cospeciation_sig(self):
+        np.random.seed(1)
+
+        obs_r, obs_p, obs_perm_stats = hommola_cospeciation(
+            self.hdist, self.pdist, self.interact, 9)
+        exp_p = .1
+        exp_r = 0.83170965463247915
+        exp_perm_stats = np.array([-0.14928122, 0.26299538, -0.21125858,
+                                   0.24143838, 0.61557855, -0.24258293,
+                                   0.09885203, 0.02858, 0.42742399])
+        self.assertAlmostEqual(obs_p, exp_p)
+        self.assertAlmostEqual(obs_r, exp_r)
+
+        npt.assert_allclose(obs_perm_stats, exp_perm_stats)
+
+    def test_hommola_cospeciation_asymmetric(self):
+        np.random.seed(1)
+
+        obs_r, obs_p, obs_perm_stats = hommola_cospeciation(
+            self.hdist_4x4, self.pdist, self.interact_5x4, 9)
+        exp_p = 0.2
+        exp_r = 0.85732140997411233
+        exp_perm_stats = np.array([-0.315244162496, -0.039405520312,
+                                   0.093429386594, -0.387835875941,
+                                   0.183711730709,  0.056057631956,
+                                   0.945732487487,  0.056057631956,
+                                   -0.020412414523])
+        self.assertAlmostEqual(obs_p, exp_p)
+        self.assertAlmostEqual(obs_r, exp_r)
+
+        npt.assert_allclose(obs_perm_stats, exp_perm_stats)
+
+    def test_hommola_cospeciation_no_sig(self):
+        np.random.seed(1)
+
+        obs_r, obs_p, obs_perm_stats = hommola_cospeciation(
+            self.hdist, self.pdist, self.interact_ns, 9)
+        exp_p = .6
+        exp_r = -0.013679391379114569
+        exp_perm_stats = np.array([-0.22216543, -0.14836061, -0.04434843,
+                                   0.1478281, -0.29105645, 0.56395839,
+                                   0.47304992, 0.79125657, 0.06804138])
+        self.assertAlmostEqual(obs_p, exp_p)
+        self.assertAlmostEqual(obs_r, exp_r)
+        npt.assert_allclose(obs_perm_stats, exp_perm_stats)
+
+    def test_hommola_vs_mantel(self):
+        # we don't compare p-values because the two methods use different
+        # permutation strategies
+        r_mantel, p_mantel, _ = mantel(
+            self.hdist, self.pdist, method='pearson', permutations=0,
+            alternative='greater'
+        )
+        r_hommola, p_hommola, _ = hommola_cospeciation(
+            self.hdist, self.pdist, self.interact_1to1, permutations=0
+        )
+
+        self.assertAlmostEqual(r_hommola, r_mantel)
+        npt.assert_equal(p_hommola, p_mantel)
+
+    def test_zero_permutations(self):
+        obs_r, obs_p, obs_perm_stats = hommola_cospeciation(
+            self.hdist, self.pdist, self.interact, 0)
+
+        exp_p = np.nan
+        exp_r = 0.83170965463247915
+        exp_perm_stats = np.array([])
+
+        npt.assert_equal(obs_p, exp_p)
+        self.assertAlmostEqual(obs_r, exp_r)
+        npt.assert_equal(obs_perm_stats, exp_perm_stats)
+
+    def test_get_dist(self):
+        labels = np.array([0, 1, 1, 2, 3])
+        k_labels, t_labels = _gen_lists(labels)
+        dists = np.array([[0, 2, 6, 3], [2, 0, 5, 4], [6, 5, 0, 7],
+                          [3, 4, 7, 0]])
+        index = np.array([2, 3, 1, 0])
+
+        expected_vec = np.array([7, 7, 5, 6, 0, 4, 3, 4, 3, 2])
+        actual_vec = _get_dist(k_labels, t_labels, dists, index)
+
+        npt.assert_allclose(actual_vec, expected_vec)
+
+    def test_gen_lists(self):
+        exp_pars_k_labels = np.array([0, 0, 0, 0, 0, 1, 1, 1,
+                                      1, 2, 2, 2, 3, 3, 4])
+        exp_pars_t_labels = np.array([1, 2, 3, 4, 4, 2, 3, 4,
+                                      4, 3, 4, 4, 4, 4, 4])
+        exp_host_k_labels = np.array([0, 0, 0, 0, 0, 1, 1, 1,
+                                      1, 2, 2, 2, 3, 3, 3])
+        exp_host_t_labels = np.array([1, 2, 3, 3, 4, 2, 3, 3,
+                                      4, 3, 3, 4, 3, 4, 4])
+
+        pars, hosts = np.nonzero(self.interact)
+
+        obs_pars_k_labels, obs_pars_t_labels = _gen_lists(pars)
+        obs_hosts_k_labels, obs_hosts_t_labels = _gen_lists(hosts)
+
+        npt.assert_allclose(exp_pars_k_labels, obs_pars_k_labels)
+        npt.assert_allclose(exp_pars_t_labels, obs_pars_t_labels)
+        npt.assert_allclose(exp_host_k_labels, obs_hosts_k_labels)
+        npt.assert_allclose(exp_host_t_labels, obs_hosts_t_labels)
+
+    def test_dm_too_small(self):
+        with self.assertRaises(ValueError):
+            hommola_cospeciation(self.h_dist_2x2, self.p_dist_3x3,
+                                 self.interact_3x3)
+
+    def test_host_interaction_not_equal(self):
+        with self.assertRaises(ValueError):
+            hommola_cospeciation(self.h_dist_3x3, self.p_dist_3x3,
+                                 self.interact_2x3)
+
+    def test_par_interaction_not_equal(self):
+        with self.assertRaises(ValueError):
+            hommola_cospeciation(self.h_dist_3x3, self.p_dist_3x3,
+                                 self.interact_3x2)
+
+    def test_interaction_too_few(self):
+        with self.assertRaises(ValueError):
+            hommola_cospeciation(self.h_dist_3x3, self.p_dist_3x3,
+                                 self.interact_zero)
+
+    def test_permutations_too_few(self):
+        with self.assertRaises(ValueError):
+            hommola_cospeciation(self.h_dist_3x3, self.p_dist_3x3,
+                                 self.interact_3x3, -1)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/stats/gradient.py b/skbio/stats/gradient.py
index 9b0bd9c..2dd1a17 100644
--- a/skbio/stats/gradient.py
+++ b/skbio/stats/gradient.py
@@ -85,13 +85,13 @@ Control
 {'avg': 3.007022633956606}
 """
 
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
 
@@ -100,9 +100,11 @@ from collections import defaultdict
 from numbers import Integral
 
 import numpy as np
-from natsort import natsorted
+from natsort import realsorted
 from scipy.stats import f_oneway
 
+from skbio.util._decorator import experimental
+
 
 def _weight_by_vector(trajectories, w_vector):
     r"""weights the values of `trajectories` given a weighting vector
@@ -218,6 +220,7 @@ class GroupResults(object):
 
     """
 
+    @experimental(as_of="0.4.0")
     def __init__(self, name, trajectory, mean, info, message):
         self.name = name
         self.trajectory = trajectory
@@ -225,6 +228,7 @@ class GroupResults(object):
         self.info = info
         self.message = message
 
+    @experimental(as_of="0.4.0")
     def to_files(self, out_f, raw_f):
         r"""Save the trajectory analysis results for a category group to files
         in text format.
@@ -270,12 +274,14 @@ class CategoryResults(object):
 
     """
 
+    @experimental(as_of="0.4.0")
     def __init__(self, category, probability, groups, message):
         self.category = category
         self.probability = probability
         self.groups = groups
         self.message = message
 
+    @experimental(as_of="0.4.0")
     def to_files(self, out_f, raw_f):
         r"""Save the trajectory analysis results for a category to files in
         text format.
@@ -316,11 +322,13 @@ class GradientANOVAResults(object):
 
     """
 
+    @experimental(as_of="0.4.0")
     def __init__(self, algorithm, weighted, categories):
         self.algorithm = algorithm
         self.weighted = weighted
         self.categories = categories
 
+    @experimental(as_of="0.4.0")
     def to_files(self, out_f, raw_f):
         r"""Save the trajectory analysis results to files in text format.
 
@@ -392,6 +400,7 @@ class GradientANOVA(object):
     # Should be defined by the derived classes
     _alg_name = None
 
+    @experimental(as_of="0.4.0")
     def __init__(self, coords, prop_expl, metadata_map,
                  trajectory_categories=None, sort_category=None, axes=3,
                  weighted=False):
@@ -448,6 +457,7 @@ class GradientANOVA(object):
         # Initialize the message buffer
         self._message_buffer = []
 
+    @experimental(as_of="0.4.0")
     def get_trajectories(self):
         r"""Compute the trajectories for each group in each category and run
         ANOVA over the results to test group independence.
@@ -530,7 +540,7 @@ class GradientANOVA(object):
             # Group samples by category
             gb = self._metadata_map.groupby(cat)
             for g, df in gb:
-                self._groups[cat][g] = natsorted(df.index, key=sort_val)
+                self._groups[cat][g] = realsorted(df.index, key=sort_val)
 
     def _get_group_trajectories(self, group_name, sids):
         r"""Compute the trajectory results for `group_name` containing the
@@ -781,6 +791,7 @@ class WindowDifferenceGradientANOVA(GradientANOVA):
 
     _alg_name = 'wdiff'
 
+    @experimental(as_of="0.4.0")
     def __init__(self, coords, prop_expl, metadata_map, window_size, **kwargs):
         super(WindowDifferenceGradientANOVA, self).__init__(coords, prop_expl,
                                                             metadata_map,
diff --git a/skbio/stats/ordination/__init__.py b/skbio/stats/ordination/__init__.py
index 20ffa79..2231946 100644
--- a/skbio/stats/ordination/__init__.py
+++ b/skbio/stats/ordination/__init__.py
@@ -88,7 +88,7 @@ Exploring the results we see that the first three axes explain about
 80% of all the variance.
 
 >>> sc_2 = ordination_result.scores(scaling=2)
->>> print sc_2.proportion_explained
+>>> print(sc_2.proportion_explained)
 [ 0.46691091  0.23832652  0.10054837  0.10493671  0.04480535  0.02974698
   0.01263112  0.00156168  0.00053235]
 
@@ -99,6 +99,7 @@ References
    Amsterdam.
 
 """
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -107,7 +108,9 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
+from __future__ import absolute_import, division, print_function
+
+from skbio.util import TestRunner
 
 from ._correspondence_analysis import CA
 from ._redundancy_analysis import RDA
@@ -120,4 +123,4 @@ from ._utils import (mean_and_std, scale, svd_rank, corr,
 __all__ = ['CA', 'RDA', 'CCA', 'PCoA', 'OrdinationResults', 'mean_and_std',
            'scale', 'svd_rank', 'corr', 'assert_ordination_results_equal']
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/stats/ordination/_base.py b/skbio/stats/ordination/_base.py
index 5479f40..a669a1f 100644
--- a/skbio/stats/ordination/_base.py
+++ b/skbio/stats/ordination/_base.py
@@ -9,7 +9,6 @@
 from __future__ import absolute_import, division, print_function
 from future.builtins import zip
 
-import warnings
 from functools import partial
 
 import numpy as np
@@ -21,6 +20,7 @@ from IPython.core.display import Image, SVG
 
 from skbio._base import SkbioObject
 from skbio.stats._misc import _pprint_strs
+from skbio.util._decorator import experimental
 
 # avoid flake8 unused import error
 Axes3D
@@ -58,6 +58,7 @@ class OrdinationResults(SkbioObject):
     """
     default_write_format = 'ordination'
 
+    @experimental(as_of="0.4.0")
     def __init__(self, eigvals, species=None, site=None, biplot=None,
                  site_constraints=None, proportion_explained=None,
                  species_ids=None, site_ids=None):
@@ -70,79 +71,7 @@ class OrdinationResults(SkbioObject):
         self.species_ids = species_ids
         self.site_ids = site_ids
 
-    @classmethod
-    def from_file(cls, ord_res_f):
-        """Load ordination results from text file.
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``from_file`` will be removed in scikit-bio 0.3.0. It is replaced by
-           ``read``, which is a more general method for deserializing
-           ordination results. ``read`` supports multiple file formats,
-           automatic file format detection, etc. by taking advantage of
-           scikit-bio's I/O registry system. See :mod:`skbio.io` for more
-           details.
-
-        Creates an ``OrdinationResults`` instance from a ``ordination``
-        formatted file. See :mod:`skbio.io.ordination` for the format
-        specification.
-
-        Parameters
-        ----------
-        ord_res_f: filepath or filehandle
-            File to read from.
-
-        Returns
-        -------
-        OrdinationResults
-            Instance of type `cls` containing the parsed contents of
-            `ord_res_f`.
-
-        Raises
-        ------
-        OrdinationFormatError
-            If the format of the file is not valid, or if the shapes of the
-            different sections of the file are not consistent.
-
-        See Also
-        --------
-        read
-
-        """
-        warnings.warn(
-            "OrdinationResults.from_file is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use "
-            "OrdinationResults.read.", DeprecationWarning)
-        return cls.read(ord_res_f, format='ordination')
-
-    def to_file(self, out_f):
-        """Save ordination results to file in text format.
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``to_file`` will be removed in scikit-bio 0.3.0. It is replaced by
-           ``write``, which is a more general method for serializing ordination
-           results. ``write`` supports multiple file formats by taking
-           advantage of scikit-bio's I/O registry system. See :mod:`skbio.io`
-           for more details.
-
-        Serializes ordination results as an ``ordination`` formatted file. See
-        :mod:`skbio.io.ordination` for the format specification.
-
-        Parameters
-        ----------
-        out_f : filepath or filehandle
-            File to write to.
-
-        See Also
-        --------
-        write
-
-        """
-        warnings.warn(
-            "OrdinationResults.to_file is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use "
-            "OrdinationResults.write.", DeprecationWarning)
-        self.write(out_f, format='ordination')
-
+    @experimental(as_of="0.4.0")
     def __str__(self):
         """Return a string representation of the ordination results.
 
@@ -156,8 +85,6 @@ class OrdinationResults(SkbioObject):
         str
             String representation of the ordination results.
 
-        .. shownumpydoc
-
         """
         lines = ['Ordination results:']
 
@@ -180,6 +107,7 @@ class OrdinationResults(SkbioObject):
 
         return '\n'.join(lines)
 
+    @experimental(as_of="0.4.0")
     def plot(self, df=None, column=None, axes=(0, 1, 2), axis_labels=None,
              title='', cmap=None, s=20):
         """Create a 3-D scatterplot of ordination results colored by metadata.
@@ -442,11 +370,13 @@ class OrdinationResults(SkbioObject):
     # directly (since otherwise the client dictates which one it shows by
     # default)
     @property
+    @experimental(as_of="0.4.0")
     def png(self):
         """Display basic 3-D scatterplot in IPython Notebook as PNG."""
         return Image(self._repr_png_(), embed=True)
 
     @property
+    @experimental(as_of="0.4.0")
     def svg(self):
         """Display basic 3-D scatterplot in IPython Notebook as SVG."""
         return SVG(self._repr_svg_())
diff --git a/skbio/stats/ordination/_canonical_correspondence_analysis.py b/skbio/stats/ordination/_canonical_correspondence_analysis.py
index c51bec2..3cc573a 100644
--- a/skbio/stats/ordination/_canonical_correspondence_analysis.py
+++ b/skbio/stats/ordination/_canonical_correspondence_analysis.py
@@ -12,6 +12,7 @@ import numpy as np
 
 from ._base import Ordination, OrdinationResults
 from ._utils import corr, svd_rank, scale
+from skbio.util._decorator import experimental
 
 
 class CCA(Ordination):
@@ -89,6 +90,7 @@ class CCA(Ordination):
     short_method_name = 'CCA'
     long_method_name = 'Canonical Correspondence Analysis'
 
+    @experimental(as_of="0.4.0")
     def __init__(self, Y, X, site_ids, species_ids):
         self.Y = np.asarray(Y, dtype=np.float64)
         self.X = np.asarray(X, dtype=np.float64)
@@ -171,6 +173,7 @@ class CCA(Ordination):
 
         self.eigenvalues = np.r_[s, s_res]**2
 
+    @experimental(as_of="0.4.0")
     def scores(self, scaling):
         r"""Compute site and species scores for different scalings.
 
diff --git a/skbio/stats/ordination/_correspondence_analysis.py b/skbio/stats/ordination/_correspondence_analysis.py
index 7919c3c..18e55de 100644
--- a/skbio/stats/ordination/_correspondence_analysis.py
+++ b/skbio/stats/ordination/_correspondence_analysis.py
@@ -12,6 +12,7 @@ import numpy as np
 
 from ._base import Ordination, OrdinationResults
 from ._utils import svd_rank
+from skbio.util._decorator import experimental
 
 
 class CA(Ordination):
@@ -55,6 +56,7 @@ class CA(Ordination):
     short_method_name = 'CA'
     long_method_name = 'Canonical Analysis'
 
+    @experimental(as_of="0.4.0")
     def __init__(self, X, row_ids, column_ids):
         self.X = np.asarray(X, dtype=np.float64)
         self._ca()
@@ -94,6 +96,7 @@ class CA(Ordination):
         self.W = W[:rank]
         self.U = Ut[:rank].T
 
+    @experimental(as_of="0.4.0")
     def scores(self, scaling):
         r"""Compute site and species scores for different scalings.
 
diff --git a/skbio/stats/ordination/_principal_coordinate_analysis.py b/skbio/stats/ordination/_principal_coordinate_analysis.py
index 7af3640..2c3092c 100644
--- a/skbio/stats/ordination/_principal_coordinate_analysis.py
+++ b/skbio/stats/ordination/_principal_coordinate_analysis.py
@@ -14,6 +14,7 @@ import numpy as np
 
 from skbio.stats.distance import DistanceMatrix
 from ._base import Ordination, OrdinationResults
+from skbio.util._decorator import experimental
 
 # - In cogent, after computing eigenvalues/vectors, the imaginary part
 #   is dropped, if any. We know for a fact that the eigenvalues are
@@ -66,6 +67,7 @@ class PCoA(Ordination):
     short_method_name = 'PCoA'
     long_method_name = 'Principal Coordinate Analysis'
 
+    @experimental(as_of="0.4.0")
     def __init__(self, distance_matrix):
         if isinstance(distance_matrix, DistanceMatrix):
             self.dm = np.asarray(distance_matrix.data, dtype=np.float64)
@@ -110,6 +112,7 @@ class PCoA(Ordination):
         self.eigvals = eigvals[idxs_descending]
         self.eigvecs = eigvecs[:, idxs_descending]
 
+    @experimental(as_of="0.4.0")
     def scores(self):
         """Compute coordinates in transformed space.
 
diff --git a/skbio/stats/ordination/_redundancy_analysis.py b/skbio/stats/ordination/_redundancy_analysis.py
index 4c75513..aa85c5f 100644
--- a/skbio/stats/ordination/_redundancy_analysis.py
+++ b/skbio/stats/ordination/_redundancy_analysis.py
@@ -12,6 +12,7 @@ import numpy as np
 
 from ._base import Ordination, OrdinationResults
 from ._utils import corr, svd_rank, scale
+from skbio.util._decorator import experimental
 
 
 class RDA(Ordination):
@@ -58,6 +59,7 @@ class RDA(Ordination):
     short_method_name = 'RDA'
     long_method_name = 'Redundancy Analysis'
 
+    @experimental(as_of="0.4.0")
     def __init__(self, Y, X, site_ids, species_ids, scale_Y=False):
         self.Y = np.asarray(Y, dtype=np.float64)
         self.X = np.asarray(X, dtype=np.float64)
@@ -156,6 +158,7 @@ class RDA(Ordination):
 
         self.eigenvalues = np.r_[s[:rank], s_res[:rank_res]]
 
+    @experimental(as_of="0.4.0")
     def scores(self, scaling):
         """Compute site, species and biplot scores for different scalings.
 
diff --git a/skbio/stats/ordination/_utils.py b/skbio/stats/ordination/_utils.py
index 16d92a2..3cfd950 100644
--- a/skbio/stats/ordination/_utils.py
+++ b/skbio/stats/ordination/_utils.py
@@ -11,7 +11,10 @@ from __future__ import absolute_import, division, print_function
 import numpy as np
 import numpy.testing as npt
 
+from skbio.util._decorator import experimental
 
+
+ at experimental(as_of="0.4.0")
 def mean_and_std(a, axis=None, weights=None, with_mean=True, with_std=True,
                  ddof=0):
     """Compute the weighted average and standard deviation along the
@@ -78,6 +81,7 @@ def mean_and_std(a, axis=None, weights=None, with_mean=True, with_std=True,
     return avg, std
 
 
+ at experimental(as_of="0.4.0")
 def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
     """Scale array by columns to have weighted average 0 and standard
     deviation 1.
@@ -124,6 +128,7 @@ def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
     return a
 
 
+ at experimental(as_of="0.4.0")
 def svd_rank(M_shape, S, tol=None):
     """Matrix rank of `M` given its singular values `S`.
 
@@ -135,6 +140,7 @@ def svd_rank(M_shape, S, tol=None):
     return np.sum(S > tol)
 
 
+ at experimental(as_of="0.4.0")
 def corr(x, y=None):
     """Computes correlation between columns of `x`, or `x` and `y`.
 
@@ -173,6 +179,7 @@ def corr(x, y=None):
     return x.T.dot(y) / x.shape[0]
 
 
+ at experimental(as_of="0.4.0")
 def assert_ordination_results_equal(left, right):
     """Assert that ordination results objects are equal.
 
diff --git a/skbio/stats/ordination/tests/__init__.py b/skbio/stats/ordination/tests/__init__.py
index 0bf0c55..3fe3dc6 100644
--- a/skbio/stats/ordination/tests/__init__.py
+++ b/skbio/stats/ordination/tests/__init__.py
@@ -5,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/stats/ordination/tests/test_ordination.py b/skbio/stats/ordination/tests/test_ordination.py
index d377a56..2a401d3 100644
--- a/skbio/stats/ordination/tests/test_ordination.py
+++ b/skbio/stats/ordination/tests/test_ordination.py
@@ -7,7 +7,8 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
-from six import binary_type, text_type, StringIO
+import six
+from six import binary_type, text_type
 
 import warnings
 import unittest
@@ -18,7 +19,7 @@ import numpy as np
 import numpy.testing as npt
 import pandas as pd
 from IPython.core.display import Image, SVG
-from nose.tools import assert_is_instance, assert_raises_regexp, assert_true
+from nose.tools import assert_is_instance, assert_true
 from scipy.spatial.distance import pdist
 
 from skbio import DistanceMatrix
@@ -662,16 +663,6 @@ class TestOrdinationResults(unittest.TestCase):
         self.min_ord_results = OrdinationResults(eigvals=eigvals, site=site,
                                                  site_ids=['A', 'B', 'C', 'D'])
 
-    def test_deprecated_io(self):
-        fh = StringIO()
-        npt.assert_warns(DeprecationWarning, self.ordination_results.to_file,
-                         fh)
-        fh.seek(0)
-        deserialized = npt.assert_warns(DeprecationWarning,
-                                        OrdinationResults.from_file, fh)
-        assert_ordination_results_equal(deserialized, self.ordination_results)
-        self.assertTrue(type(deserialized) == OrdinationResults)
-
     def test_str(self):
         exp = ("Ordination results:\n"
                "\tEigvals: 2\n"
@@ -747,7 +738,7 @@ class TestOrdinationResults(unittest.TestCase):
         self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
 
     def test_plot_with_invalid_axis_labels(self):
-        with assert_raises_regexp(ValueError, 'axis_labels.*4'):
+        with six.assertRaisesRegex(self, ValueError, 'axis_labels.*4'):
             self.min_ord_results.plot(axes=[2, 0, 1],
                                       axis_labels=('a', 'b', 'c', 'd'))
 
@@ -759,27 +750,27 @@ class TestOrdinationResults(unittest.TestCase):
 
     def test_validate_plot_axes_invalid_input(self):
         # not enough dimensions
-        with assert_raises_regexp(ValueError, '2 dimension\(s\)'):
+        with six.assertRaisesRegex(self, ValueError, '2 dimension\(s\)'):
             self.min_ord_results._validate_plot_axes(
                 np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
 
         coord_matrix = self.min_ord_results.site.T
 
         # wrong number of axes
-        with assert_raises_regexp(ValueError, 'exactly three.*found 0'):
+        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 0'):
             self.min_ord_results._validate_plot_axes(coord_matrix, [])
-        with assert_raises_regexp(ValueError, 'exactly three.*found 4'):
+        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 4'):
             self.min_ord_results._validate_plot_axes(coord_matrix,
                                                      (0, 1, 2, 3))
 
         # duplicate axes
-        with assert_raises_regexp(ValueError, 'must be unique'):
+        with six.assertRaisesRegex(self, ValueError, 'must be unique'):
             self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
 
         # out of range axes
-        with assert_raises_regexp(ValueError, 'axes\[1\].*3'):
+        with six.assertRaisesRegex(self, ValueError, 'axes\[1\].*3'):
             self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
-        with assert_raises_regexp(ValueError, 'axes\[2\].*3'):
+        with six.assertRaisesRegex(self, ValueError, 'axes\[2\].*3'):
             self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
 
     def test_get_plot_point_colors_invalid_input(self):
@@ -794,17 +785,17 @@ class TestOrdinationResults(unittest.TestCase):
                                                         ['B', 'C'], 'jet')
 
         # column not in df
-        with assert_raises_regexp(ValueError, 'missingcol'):
+        with six.assertRaisesRegex(self, ValueError, 'missingcol'):
             self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
                                                         ['B', 'C'], 'jet')
 
         # id not in df
-        with assert_raises_regexp(ValueError, 'numeric'):
+        with six.assertRaisesRegex(self, ValueError, 'numeric'):
             self.min_ord_results._get_plot_point_colors(
                 self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
 
         # missing data in df
-        with assert_raises_regexp(ValueError, 'nancolumn'):
+        with six.assertRaisesRegex(self, ValueError, 'nancolumn'):
             self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
                                                         ['B', 'C', 'A'], 'jet')
 
diff --git a/skbio/stats/power.py b/skbio/stats/power.py
index 850d633..5151800 100644
--- a/skbio/stats/power.py
+++ b/skbio/stats/power.py
@@ -36,8 +36,8 @@ of subsamples.
 
 Sampling may be handled in two ways. For any set of samples, we may simply
 choose to draw :math:`n` observations at random for each sample. Alternatively,
-if metadata is avalaible, samples can be matched based on a set of control
-categories so that paired samples are drawn at random from the set of avaliable
+if metadata is available, samples can be matched based on a set of control
+categories so that paired samples are drawn at random from the set of available
 matches.
 
 Functions
@@ -49,8 +49,8 @@ Functions
     subsample_power
     subsample_paired_power
     confidence_bound
-    bootstrap_power_curve
     paired_subsamples
+    bootstrap_power_curve
 
 Examples
 --------
@@ -96,7 +96,6 @@ estimate for the critical value of 0.01, and a critical value of 0.001.
 >>> from skbio.stats.power import subsample_power
 >>> pwr_100, counts_100 = subsample_power(test=f,
 ...                                       samples=samples,
-...                                       min_observations=3,
 ...                                       max_counts=10,
 ...                                       min_counts=3,
 ...                                       counts_interval=1,
@@ -104,7 +103,6 @@ estimate for the critical value of 0.01, and a critical value of 0.001.
 ...                                       alpha_pwr=0.1)
 >>> pwr_010, counts_010 = subsample_power(test=f,
 ...                                       samples=samples,
-...                                       min_observations=3,
 ...                                       max_counts=10,
 ...                                       min_counts=3,
 ...                                       counts_interval=1,
@@ -112,7 +110,6 @@ estimate for the critical value of 0.01, and a critical value of 0.001.
 ...                                       alpha_pwr=0.01)
 >>> pwr_001, counts_001 = subsample_power(test=f,
 ...                                       samples=samples,
-...                                       min_observations=3,
 ...                                       max_counts=10,
 ...                                       min_counts=3,
 ...                                       counts_interval=1,
@@ -121,11 +118,11 @@ estimate for the critical value of 0.01, and a critical value of 0.001.
 >>> counts_100
 array([3, 4, 5, 6, 7, 8, 9])
 >>> pwr_100.mean(0)
-array([ 0.466 ,  0.827 ,  0.936 ,  0.9852,  0.998 ,  1.    ,  1.    ])
+array([ 0.4716,  0.8226,  0.9424,  0.986 ,  0.9988,  1.    ,  1.    ])
 >>> pwr_010.mean(0)
-array([ 0.0468,  0.2394,  0.5298,  0.8184,  0.951 ,  0.981 ,  0.9982])
+array([ 0.0492,  0.2368,  0.5462,  0.823 ,  0.9474,  0.9828,  0.9982])
 >>> pwr_001.mean(0)
-array([ 0.003 ,  0.0176,  0.1212,  0.3428,  0.5892,  0.8256,  0.9566])
+array([ 0.0028,  0.0174,  0.1262,  0.342 ,  0.5928,  0.8256,  0.9594])
 
 Based on this power estimate, as we increase our confidence that we have not
 committed a type I error and identified a false positive, the number of samples
@@ -133,32 +130,40 @@ we need to be confident that we have not committed a type II error increases.
 
 """
 
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
 from future.utils import viewitems
 from future.builtins import range
 
+import collections
+import copy
+
 import numpy as np
 import scipy.stats
+import six
+
+from skbio.util._decorator import experimental, deprecated
 
 
-def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05,
-                    min_observations=20, max_counts=50, counts_interval=10,
-                    min_counts=None, num_iter=500, num_runs=10):
+ at experimental(as_of="0.4.0")
+def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05, ratio=None,
+                    max_counts=50, counts_interval=10, min_counts=None,
+                    num_iter=500, num_runs=10):
     r"""Subsamples data to iteratively calculate power
 
     Parameters
     ----------
     test : function
         The statistical test which accepts a list of arrays of values
-        (sample ids or numeric values) and returns a p value.
+        (sample ids or numeric values) and returns a p value or one-dimensional
+        array of p values.
     samples : array_like
         `samples` can be a list of lists or a list of arrays where each
         sublist or row in the array corresponds to a sampled group.
@@ -173,28 +178,34 @@ def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05,
         "ind" mode should be used.
     alpha_pwr : float, optional
         The critical value used to calculate the power.
-    min_observations : unsigned int, optional
-        The minimum number of observations in any sample to
-        perform power analysis. Note that this is not the same as the minimum
-        number of samples drawn per group.
-    max_counts : unsigned int, optional
-        The maximum number of samples per group to draw for
-        effect size calculation.
-    counts_interval : unsigned int, optional
+    ratio : 1-D array, optional
+        The fraction of the sample counts which should be
+        assigned to each group. If this is a 1-D array, it must be the same
+        length as `samples`. If no value is supplied (`ratio` is None),
+        then an equal number of observations will be drawn for each sample. In
+        `matched` mode, this will be set to one.
+    max_counts : positive int, optional
+        The maximum number of samples per group to draw for effect size
+        calculation.
+    counts_interval : positive int, optional
         The difference between each subsampling count.
-    min_counts : unsigned int, optional
+    min_counts : positive int, optional
         How many samples should be drawn for the smallest
         subsample. If this is None, the `counts_interval` will be used.
-    num_iter : unsigned int, optional
+    num_iter : positive int, optional
         The number of p-values to generate for each point
         on the curve.
-    num_runs : unsigned int, optional
+    num_runs : positive int, optional
         The number of times to calculate each curve.
 
     Returns
     -------
     power : array
-        The power calculated for each subsample at each count.
+        The power calculated for each subsample at each count. The array has
+        `num_runs` rows, a length with the same number of elements as
+        `sample_counts` and a depth equal to the number of p values returned by
+        `test`. If `test` returns a float, the returned array will be
+        two-dimensional instead of three.
     sample_counts : array
         The number of samples drawn at each power calculation.
 
@@ -209,38 +220,39 @@ def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05,
     ValueError
         If the `counts_interval` is greater than the difference between the
         sample start and the max value, the function raises a ValueError.
+    ValueError
+        There are not an equal number of groups in `samples` and in `ratios`.
+    TypeError
+        `test` does not return a float or a 1-dimensional numpy array.
 
 
     Examples
     --------
     Let's say we wanted to look at the relationship between the presence of a
-    specific bacteria and the probability of a pre or post menopausal woman
-    experiencing a health outcome. Healthy women were enrolled in the study
-    either before or after menopause, and followed for five years. They
-    submitted fecal samples at regular intervals during that period, and were
-    assessed for a particular irreversible health outcome over that period.
-
-    16S sequencing and available literature suggest a set of candidate taxa
-    may be associated with the health outcome. Assume there are 100 samples
-    (50 premenopausal samples and 50 postmenopausal samples) where the taxa
-    of interest was identified by 16S sequencing and the taxonomic abundance
-    was confirmed in a certain fraction of samples at a minimum level.
-
-    We can simulate the probability that a woman positive for this taxa
-    experiences the health outcome using a binomial distribution.
+    specific bacteria, *Gardnerella vaginalis* in the vaginal community, and
+    the probability of a pre or post menopausal woman experiencing a urinary
+    tract infection (UTI). Healthy women were enrolled in the study either
+    before or after menopause, and followed for eight weeks. Participants
+    submitted fecal samples at the beginning of the study, and were then
+    followed for clinical symptoms of a UTI. A confirmed UTI was an endpoint
+    in the study.
+
+    Using available literature and 16S sequencing, a set of candidate taxa were
+    identified as correlated with UTIs, including *G. vaginalis*. In the 100
+    women (50 premenopausal and 50 postmenopausal samples) who had UTIs, the
+    presence or absence of *G. vaginalis* was confirmed with quantitative PCR.
+
+    We can model the probability that detectable *G. vaginalis* was found in
+    these samples using a binomial model. (*Note that this is a simulation.*)
 
     >>> import numpy as np
     >>> np.random.seed(25)
-    >>> pre_rate = np.random.binomial(1, 0.75, size=(50,))
-    >>> pre_rate
-    array([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
-           0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0,
-           1, 1, 1, 1])
-    >>> pos_rate = np.random.binomial(1, 0.25, size=(50,))
-    >>> pos_rate
-    array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0,
-           0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0,
-           0, 1, 0, 0])
+    >>> pre_rate = np.random.binomial(1, 0.85, size=(50,))
+    >>> pre_rate.sum()
+    45
+    >>> pos_rate = np.random.binomial(1, 0.40, size=(50,))
+    >>> pos_rate.sum()
+    21
 
     Let's set up a test function, so we can test the probability of
     finding a difference in frequency between the two groups. We'll use
@@ -253,82 +265,125 @@ def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05,
 
     Let's make sure that our two distributions are different.
 
-    >>> round(test([pre_rate, pos_rate]), 5)
-    9e-05
+    >>> round(test([pre_rate, pos_rate]), 3)
+    0.003
 
     Since there are an even number of samples, and we don't have enough
     information to try controlling the data, we'll use
     `skbio.stats.power.subsample_power` to compare the two groups. If we had
-    metadata about other risk factors, like a family history, BMI, tobacco use,
-    we might want to use `skbio.stats.power.subsample_paired_power`.
+    metadata about other risk factors, like a reproductive history, BMI,
+    tobacco use, we might want to use
+    `skbio.stats.power.subsample_paired_power`.
     We'll also use "ind" `draw_mode`, since there is no linkage between the
     two groups of samples.
 
     >>> from skbio.stats.power import subsample_power
     >>> pwr_est, counts = subsample_power(test=test,
     ...                                   samples=[pre_rate, pos_rate],
+    ...                                   num_iter=100,
+    ...                                   num_runs=5,
     ...                                   counts_interval=5)
     >>> counts
     array([ 5, 10, 15, 20, 25, 30, 35, 40, 45])
-    >>> nanmean(pwr_est, 0)
-    array([ 0.1776,  0.3392,  0.658 ,  0.8856,  0.9804,  0.9982,  1.    ,
-            1.    ,  1.    ])
+    >>> nanmean(pwr_est, 0) # doctest: +NORMALIZE_WHITESPACE
+    array([ 0.056,  0.074,  0.226,  0.46 ,  0.61 ,  0.806,  0.952,  1.   ,
+            1.   ])
+    >>> counts[nanmean(pwr_est, 0) > 0.8].min()
+    30
 
-    So, we can estimate that we will see a significant difference between
-    the two groups (:math:`\alpha \leq 0.05`) at least 80% of the time if we
-    use 20 observations per group.
+    So, we can estimate that we will see a significant difference in the
+    presence of *G. vaginalis* in the stool of pre and post women with UTIs if
+    we have at least 30 samples per group.
 
     If we wanted to test the relationship of a second candidate taxa which is
     more rare in the population, but may have a similar effect, based on
-    available literature, we might also start by trying to identify 20
+    available literature, we might also start by trying to identify 30
     samples per group where the second candidate taxa is present.
 
-    """
-
-    # Determines the minimum number of ids in a category
-    num_ids = np.array([len(id_) for id_ in samples]).min()
+    Suppose, now, that we want to test that a secondary metabolite seen only in
+    the presence of *G vaginalis* to see if it is also correlated with UTIs. We
+    can model the abundance of the metabolite as a normal distribution.
 
-    # Checks that "matched" mode is handled appropriately
-    if draw_mode == "matched":
-        for id_ in samples:
-            if not len(id_) == num_ids:
-                raise ValueError('Each vector in samples must be the same '
-                                 'length in "matched" draw_mode.')
+    >>> met_pos = (np.random.randn(pre_rate.sum() + pos_rate.sum()) * 2000 +
+    ...     2500)
+    >>> met_pos[met_pos < 0] = 0
+    >>> met_neg = met_neg = (np.random.randn(100 - (pre_rate.sum() +
+    ...     pos_rate.sum())) * 2000 + 500)
+    >>> met_neg[met_neg < 0] = 0
 
-    # Checks there are enough samples to subsample
-    if num_ids <= min_observations:
-        raise ValueError('There are not enough samples for subsampling.')
+    Let's compare the populations with a kruskal-wallis test. Physically, there
+    cannot be a negative concentration of a chemical, so we've set the lower
+    bound at 0. This means that we can no longer assume our distribution is
+    normal.
 
-    # Calculates the effect size vector
-    if min_counts is None:
-        min_counts = counts_interval
+    >>> from scipy.stats import kruskal
+    >>> def metabolite_test(x):
+    ...     return kruskal(x[0], x[1])[1]
+    >>> round(metabolite_test([met_pos, met_neg]), 3)
+    0.005
+
+    When we go to perform the statistical test on all the data, you might
+    notice that there are twice as many samples from women with *G. vaginalis*
+    than those without. It might make sense to account for this difference when
+    we're testing power. So, we're going to set the `ratio` parameter, which
+    lets us draw twice as many samples from women with *G. vaginalis*.
+
+    >>> pwr_est2, counts2 = subsample_power(test=metabolite_test,
+    ...                                     samples=[met_pos, met_neg],
+    ...                                     counts_interval=5,
+    ...                                     num_iter=100,
+    ...                                     num_runs=5,
+    ...                                     ratio=[2, 1])
+    >>> counts2
+    array([  5.,  10.,  15.,  20.,  25.,  30.])
+    >>> nanmean(pwr_est2, 0)
+    array([ 0.14 ,  0.272,  0.426,  0.646,  0.824,  0.996])
+    >>> counts2[nanmean(pwr_est2, 0) > 0.8].min()
+    25.0
+
+    When we consider the number of samples per group needed in the power
+    analysis, we need to look at the ratio. The analysis says that we need 25
+    samples in the smallest group, in this case, the group of women without
+    *G. vaginalis* and 50 samples from women with *G. vaginalis* to see a
+    significant difference in the abundance of our secondary metabolite at 80%
+    power.
 
-    if (max_counts - min_counts) < counts_interval:
-        raise ValueError("No subsamples of the specified size can be drawn.")
+    """
 
-    sample_counts = np.arange(min_counts,
-                              min(max_counts, num_ids),
-                              counts_interval)
+    # Checks the inputs
+    ratio, num_p, sample_counts = \
+        _check_subsample_power_inputs(test=test,
+                                      samples=samples,
+                                      draw_mode=draw_mode,
+                                      ratio=ratio,
+                                      min_counts=min_counts,
+                                      max_counts=max_counts,
+                                      counts_interval=counts_interval)
 
     # Prealocates the power array
-    power = np.zeros((num_runs, len(sample_counts)))
+    power = np.zeros((num_runs, len(sample_counts), num_p))
 
     # Calculates the power instances
-    for id1 in range(num_runs):
-        power[id1, :] = _calculate_power_curve(test,
-                                               samples,
-                                               sample_counts,
-                                               num_iter=num_iter,
-                                               alpha=alpha_pwr,
-                                               mode=draw_mode)
+    for id2, c in enumerate(sample_counts):
+        count = np.round(c * ratio, 0).astype(int)
+        for id1 in range(num_runs):
+            ps = _compare_distributions(test=test,
+                                        samples=samples,
+                                        num_p=num_p,
+                                        counts=count,
+                                        num_iter=num_iter,
+                                        mode=draw_mode)
+            power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
+
+    power = power.squeeze()
 
     return power, sample_counts
 
 
+ at experimental(as_of="0.4.0")
 def subsample_paired_power(test, meta, cat, control_cats, order=None,
                            strict_match=True, alpha_pwr=0.05,
-                           min_observations=20, max_counts=50,
-                           counts_interval=10, min_counts=None,
+                           max_counts=50, counts_interval=10, min_counts=None,
                            num_iter=500, num_runs=10):
     r"""Estimates power iteratively using samples with matching metadata
 
@@ -360,48 +415,50 @@ def subsample_paired_power(test, meta, cat, control_cats, order=None,
         `control_cats` can be considered matches.
     alpha_pwr : float, optional
         The critical value used to calculate the power.
-    min_observations : unsigned int, optional
-        The minimum number of paired samples which must exist
-        for a category and set of control categories to be able to subsample
-        and make power calculations. This is not the same as the minimum
-        number of observations to draw during subsampling.
-    max_counts : unsigned int, optional
+    max_counts : positive int, optional
         The maximum number of observations per sample to draw
         for effect size calculation.
-    counts_interval : unsigned int, optional
+    counts_interval : positive int, optional
         The difference between each subsampling count.
-    min_counts : unsigned int, optional
+    min_counts : positive int, optional
         How many samples should be drawn for the smallest
         subsample. If this is None, the `counts_interval` will be used.
-    num_iter : unsigned int, optional
+    num_iter : positive int, optional
         The number of p-values to generate for each point on the curve.
-    num_runs : unsigned int, optional
+    num_runs : positive int, optional
         The number of times to calculate each curve.
 
     Returns
     -------
     power : array
-        The power calculated for each subsample at each count.
+        The power calculated for each subsample at each count. The array is
+        `num_runs` rows, a length with the same number of elements as
+        `sample_counts` and a depth equal to the number of p values returned by
+        `test`. If `test` returns a float, the returned array will be
+        two-dimensional instead of three.
     sample_counts : array
         The number of samples drawn at each power calculation.
 
     Raises
     ------
     ValueError
-        There is a value error if there are fewer samples than the minimum
+        There is a ValueError if there are fewer samples than the minimum
         count.
     ValueError
         If the `counts_interval` is greater than the difference between the
         sample start and the max value, the function raises a ValueError.
+    TypeError
+        `test` does not return a float or a 1-dimensional numpy array.
+
 
     Examples
     --------
     Assume you are interested in the role of a specific cytokine of protein
-    translocation in myloid-lineage cells. You are able to culture two
+    translocation in myeloid-lineage cells. You are able to culture two
     macrophage lineages (bone marrow derived phagocytes and
     peritoneally-derived macrophages). Due to unfortunate circumstances, your
     growth media must be acquired from multiple sources (lab, company A,
-    company B). Also unfortunate, you must use labor-intense low throughput
+    company B). Also unfortunate, you must use labor-intensive low throughput
     assays. You have some preliminary measurements, and you'd like to
     predict how many (more) cells you need to analyze for 80% power.
 
@@ -428,7 +485,7 @@ def subsample_paired_power(test, meta, cat, control_cats, order=None,
     >>> from scipy.stats import kruskal
     >>> f = lambda x: kruskal(*[data.loc[i, 'OUTCOME'] for i in x])[1]
 
-    Let's check that cytokine treatment has a signifigant effect across all
+    Let's check that cytokine treatment has a significant effect across all
     the cells.
 
     >>> treatment_stat = [g for g in data.groupby('TREATMENT').groups.values()]
@@ -455,69 +512,61 @@ def subsample_paired_power(test, meta, cat, control_cats, order=None,
     ...                                   meta=data,
     ...                                   cat='TREATMENT',
     ...                                   control_cats=control_cats,
-    ...                                   min_observations=5,
     ...                                   counts_interval=5,
     ...                                   num_iter=100,
     ...                                   num_runs=5)
     >>> cnt
-    array([ 5, 10, 15, 20])
+    array([  5.,  10.,  15.,  20.])
     >>> pwr.mean(0)
-    array([ 0.15 ,  0.376,  0.614,  0.836])
+    array([ 0.196,  0.356,  0.642,  0.87 ])
     >>> pwr.std(0).round(3)
-    array([ 0.046,  0.106,  0.176,  0.153])
+    array([ 0.019,  0.021,  0.044,  0.026])
 
     Estimating off the power curve, it looks like 20 cells per group may
-    provide addiquite power for this experiment, although the large variance
+    provide adequate power for this experiment, although the large variance
     in power might suggest extending the curves or increasing the number of
     samples per group.
 
     """
 
-    # Checks for the number of sampling pairs avaliable
-    sub_ids = paired_subsamples(meta, cat, control_cats, order, strict_match)
-
-    # Determines the minimum number of ids avaliable
-    num_ids = len(sub_ids[0])
-
-    # Checks there are enough samples to subsample
-    if num_ids <= min_observations:
-        raise ValueError('There are not enough samples for subsampling.')
+    # Handles the order argument
+    if order is None:
+        order = sorted(meta.groupby(cat).groups.keys())
+    order = np.array(order)
 
-    # Calculates the effect size vector
-    if min_counts is None:
-        min_counts = counts_interval
+    # Checks for the number of sampling pairs available
+    meta_pairs, index = _identify_sample_groups(meta, cat, control_cats, order,
+                                                strict_match)
+    min_obs = min([_get_min_size(meta, cat, control_cats, order, strict_match),
+                  np.floor(len(index)*0.9)])
+    sub_ids = _draw_paired_samples(meta_pairs, index, min_obs)
+
+    ratio, num_p, sample_counts = \
+        _check_subsample_power_inputs(test=test,
+                                      samples=sub_ids,
+                                      draw_mode='matched',
+                                      min_counts=min_counts,
+                                      max_counts=max_counts,
+                                      counts_interval=counts_interval)
 
-    if (max_counts - min_counts) < counts_interval:
-        raise ValueError("No subsamples of the specified size can be drawn.")
+    # Prealocates the power array
+    power = np.zeros((num_runs, len(sample_counts), num_p))
 
-    sample_counts = np.arange(min_counts,
-                              min(max_counts, num_ids),
-                              counts_interval)
+    # Calculates power instances
+    for id2, c in enumerate(sample_counts):
+        for id1 in range(num_runs):
+            ps = np.zeros((num_p, num_iter))
+            for id3 in range(num_iter):
+                subs = _draw_paired_samples(meta_pairs, index, c)
+                ps[:, id3] = test(subs)
+            power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
 
-    # Prealocates the power array
-    power = np.zeros((num_runs, len(sample_counts)))
-
-    power[0, :] = _calculate_power_curve(test,
-                                         sub_ids,
-                                         sample_counts,
-                                         mode="matched",
-                                         num_iter=num_iter,
-                                         alpha=alpha_pwr)
-
-    for id1 in np.arange(1, num_runs):
-        sub_ids = paired_subsamples(meta, cat, control_cats, order,
-                                    strict_match)
-        # Calculates the power curve
-        power[id1, :] = _calculate_power_curve(test,
-                                               sub_ids,
-                                               sample_counts,
-                                               num_iter=num_iter,
-                                               alpha=alpha_pwr,
-                                               mode="matched")
+    power = power.squeeze()
 
     return power, sample_counts
 
 
+ at experimental(as_of="0.4.0")
 def confidence_bound(vec, alpha=0.05, df=None, axis=None):
     r"""Calculates a confidence bound assuming a normal distribution
 
@@ -531,7 +580,7 @@ def confidence_bound(vec, alpha=0.05, df=None, axis=None):
         The degrees of freedom associated with the
         distribution. If None is given, df is assumed to be the number of
         elements in specified axis.
-    axis : unsigned int, optional
+    axis : positive int, optional
         The axis over which to take the deviation. When axis
         is None, a single value will be calculated for the whole matrix.
 
@@ -564,7 +613,14 @@ def confidence_bound(vec, alpha=0.05, df=None, axis=None):
 
     return bound
 
+bootstrap_power_curve_deprecation_reason = (
+    "Please use skbio.stats.power.subsample_power or "
+    "skbio.stats.power.subsample_paired_power followed by "
+    "confidence_bound.")
 
+
+ at deprecated(as_of="0.2.3-dev", until="0.4.1",
+            reason=bootstrap_power_curve_deprecation_reason)
 def bootstrap_power_curve(test, samples, sample_counts, ratio=None,
                           alpha=0.05, mode='ind', num_iter=500, num_runs=10):
     r"""Repeatedly calculates the power curve for a specified alpha level
@@ -593,9 +649,9 @@ def bootstrap_power_curve(test, samples, sample_counts, ratio=None,
         this may be useful when working with regression data where
         :math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
         y_{n}`.
-    num_iter : unsigned int, optional
+    num_iter : positive int, optional
         The number of p-values to generate for each point on the curve.
-    num_runs : unsigned int, optional
+    num_runs : positive int, optional
         The number of times to calculate each curve.
 
     Returns
@@ -607,8 +663,8 @@ def bootstrap_power_curve(test, samples, sample_counts, ratio=None,
 
     Examples
     --------
-    Suppose we have 100 samples randomly drawn from two normal distribitions,
-    the first with mean 0 and standard devation 1, and the second with mean 3
+    Suppose we have 100 samples randomly drawn from two normal distributions,
+    the first with mean 0 and standard deviation 1, and the second with mean 3
     and standard deviation 1.5
 
     >>> import numpy as np
@@ -616,7 +672,7 @@ def bootstrap_power_curve(test, samples, sample_counts, ratio=None,
     >>> samples_1 = np.random.randn(100)
     >>> samples_2 = 1.5 * np.random.randn(100) + 1
 
-    We want to test the statistical power of a independent two sample t-test
+    We want to test the statistical power of an independent two sample t-test
     comparing the two populations. We can define an anonymous function, `f`,
     to wrap the scipy function for independent t tests,
     `scipy.stats.ttest_ind`. The test function will take a list of value
@@ -654,22 +710,21 @@ def bootstrap_power_curve(test, samples, sample_counts, ratio=None,
                                    num_iter=num_iter,
                                    alpha=alpha,
                                    mode=mode)
-
-    # Calculates two summary statitics
+    # Calculates two summary statistics
     power_mean = power.mean(0)
     power_bound = confidence_bound(power, alpha=alpha[0], axis=0)
 
-    # Calculates summary statitics
+    # Calculates summary statistics
     return power_mean, power_bound
 
 
+ at experimental(as_of="0.4.0")
 def paired_subsamples(meta, cat, control_cats, order=None, strict_match=True):
-    r"""Gets a set of samples to serve as controls
+    r"""Draws a list of samples varied by `cat` and matched for `control_cats`
 
     This function is designed to provide controlled samples, based on a
     metadata category. For example, one could control for age, sex, education
-    level, and diet type while measuring exercise frequency. No outcome
-    value is considered in this subsampling process.
+    level, and diet type while measuring exercise frequency.
 
     Parameters
     ----------
@@ -687,12 +742,13 @@ def paired_subsamples(meta, cat, control_cats, order=None, strict_match=True):
         groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
         would be set to ['A', 'B'].
     strict_match: bool, optional
-        This determines how data is grouped using
-        `control_cats`. If a sample within `meta` has an undefined value (NaN)
-        for any of the columns in `control_cats`, the sample will not be
-        considered as having a match and will be ignored when `strict_match`
-        is True. If `strict_match` is False, missing values (NaN) in the
-        `control_cats` can be considered matches.
+        This determines how data is grouped using `control_cats`. If a sample
+        within `meta` has an undefined value (`NaN`) for any of the columns in
+        `control_cats`, the sample will not be considered as having a match and
+        will be ignored when `strict_match` is True. If `strict_match` is
+        False, missing values (NaN) in the `control_cats` can be considered
+        matches.
+
 
     Returns
     -------
@@ -727,83 +783,65 @@ def paired_subsamples(meta, cat, control_cats, order=None, strict_match=True):
     >>> from skbio.stats.power import paired_subsamples
     >>> ids = paired_subsamples(meta, 'HOUSING', ['SEX', 'AGE', 'ABX'])
     >>> np.hstack(ids) #doctest: +ELLIPSIS
-    array(['BB', 'TS', 'CB']...
+    array(['BB', 'TS', 'CB']...)
 
     So, for this set of data, we can match TS, CB, and BB based on their age,
-    sex, and antibiotic use. SW cannot be matched in either group becuase
+    sex, and antibiotic use. SW cannot be matched in either group because
     `strict_match` was true, and there is missing AGE data for this sample.
 
     """
 
-    # Sets the index data
-    # Groups meta by category
-    cat_groups = meta.groupby(cat).groups
-
     # Handles the order argument
     if order is None:
-        order = sorted(cat_groups.keys())
+        order = sorted(meta.groupby(cat).groups.keys())
     order = np.array(order)
-    num_groups = len(order)
-
-    # Determines the number of samples, and the experimental and control group
-    group_size = np.array([len(cat_groups[o]) for o in order])
-    ctrl_name = order[group_size == group_size.min()][0]
-    order = order[order != ctrl_name]
-
-    # Gets a control group table
-    ctrl_match_groups = meta.groupby(control_cats).groups
-    ctrl_group = meta.loc[cat_groups[ctrl_name]
-                          ].groupby(list(control_cats)).groups
-
-    ids = [np.array([])] * num_groups
-    # Loops through samples in the experimental group to match for controls
-    for check_group, ctrl_ids in viewitems(ctrl_group):
-        # Checks the categories have been defined
-        undefed_check = np.array([_check_strs(p) for p in check_group])
-        if not undefed_check.all() and strict_match:
-            continue
-        # Removes the matched ids from order
-        matched_ids = ctrl_match_groups[check_group]
-        for id_ in ctrl_ids:
-            matched_ids.remove(id_)
-        pos_ids = []
-        num_ids = [len(ctrl_ids)]
-        # Gets the matrix of the matched ids and groups them
-        exp_group = meta.loc[matched_ids].groupby(cat).groups
-        for grp in order:
-            # Checks group to be considered is included in the grouping
-            if grp not in exp_group:
-                break
-            # Gets the id associated with the group
-            pos_ids.append(exp_group[grp])
-            num_ids.append(len(exp_group[grp]))
-        # Determines the minimum number of samples
-        num_draw = np.array(num_ids).min()
-        # Draws samples from possible ids
-        exp_ids = [np.random.choice(ctrl_ids, num_draw, replace=False)]
-        exp_ids.extend([np.random.choice(id_, num_draw, replace=False)
-                        for id_ in pos_ids])
-
-        if len(exp_ids) == num_groups:
-            for idx in range(num_groups):
-                ids[idx] = np.hstack((ids[idx], exp_ids[idx]))
+
+    # Checks the groups in the category
+    min_obs = _get_min_size(meta, cat, control_cats, order, strict_match)
+
+    # Identifies all possible subsamples
+    meta_pairs, index = _identify_sample_groups(meta=meta,
+                                                cat=cat,
+                                                control_cats=control_cats,
+                                                order=order,
+                                                strict_match=strict_match)
+
+    # Draws paired ids
+    ids = _draw_paired_samples(meta_pairs=meta_pairs,
+                               index=index,
+                               num_samps=min_obs)
 
     return ids
 
 
-def _check_strs(x):
-    r"""Returns False if x is a nan and True is x is a string or number"""
+def _get_min_size(meta, cat, control_cats, order, strict_match):
+    """Determines the smallest group represented"""
+
+    if strict_match:
+        all_cats = copy.deepcopy(control_cats)
+        all_cats.append(cat)
+        meta = meta[all_cats].dropna()
+
+    return meta.groupby(cat).count().loc[order, control_cats[0]].min()
 
-    if isinstance(x, str):
+
+def _check_nans(x, switch=False):
+    r"""Returns False if x is a nan and True is x is a string or number
+    """
+    if isinstance(x, six.string_types):
         return True
     elif isinstance(x, (float, int)):
         return not np.isnan(x)
+    elif switch and isinstance(x, (list, tuple)) and np.nan in x:
+        return False
+    elif switch and isinstance(x, (list, tuple)):
+        return True
     else:
         raise TypeError('input must be a string, float or a nan')
 
 
 def _calculate_power(p_values, alpha=0.05):
-    r"""Calculates statical power empirically
+    r"""Calculates statistical power empirically
 
     Parameters
     ----------
@@ -816,48 +854,56 @@ def _calculate_power(p_values, alpha=0.05):
     Returns
     -------
     power : float
-        The emperical power, or the fraction of observed p values below the
+        The empirical power, or the fraction of observed p values below the
         critical value.
 
     """
 
-    w = (p_values < float(alpha)).sum()/float(p_values.shape[0])
+    p_values = np.atleast_2d(p_values)
+
+    w = (p_values < alpha).sum(axis=1)/p_values.shape[1]
 
     return w
 
 
-def _compare_distributions(test, samples, counts=5, mode="ind", num_iter=1000):
+def _compare_distributions(test, samples, num_p, counts=5, mode="ind",
+                           num_iter=100):
     r"""Compares two distribution arrays iteratively
 
     Parameters
     ----------
     test : function
         The statistical test which accepts an array_like of sample ids
-        (list of lists) and returns a p-value.
+        (list of lists) and returns a p-value. This can be a one-dimensional
+        array, or a float.
     samples : list of arrays
         A list where each 1-d array represents a sample. If `mode` is
         "matched", there must be an equal number of observations in each
         sample.
-    counts : unsigned int or 1-D array, optional
+    num_p : positive int, optional
+        The number of p-values returned by the test.
+    counts : positive int or 1-D array, optional
         The number of samples to draw from each distribution.
         If this is a 1-D array, the length must correspond to the number of
         samples. The function will not draw more observations than are in a
         sample. In "matched" `mode`, the same number of observations will be
         drawn from each group.
-    mode : {"ind", "matched"}, optional
+    mode : {"ind", "matched", "paired"}, optional
         "matched" samples should be used when observations in
         samples have corresponding observations in other groups. For instance,
         this may be useful when working with regression data where
         :math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
         y_{n}`.
-    num_iter : int, optional
+    num_iter : positive int, optional
         Default 1000. The number of p-values to generate for each point on the
         curve.
 
     Returns
     -------
     p_values : array
-        The p-values for `n_iter` subsampled tests.
+        The p-values for the subsampled tests. If `test` returned a single
+        p value, p_values is a one-dimensional array. If `test` returned an
+        array, `p_values` has dimensions `num_iter` x `num_p`
 
     Raises
     ------
@@ -870,35 +916,16 @@ def _compare_distributions(test, samples, counts=5, mode="ind", num_iter=1000):
 
     """
 
-    # Determines the number of groups
-    num_groups = len(samples)
+    # Prealocates the pvalue matrix
+    p_values = np.zeros((num_p, num_iter))
 
-    # Checks the mode
-    if mode not in {'ind', 'matched'}:
-        raise ValueError('Supported sample modes are "ind" and "matched".')
+    # Determines the number of samples per group
+    num_groups = len(samples)
+    samp_lens = [len(sample) for sample in samples]
 
-    # Handles the number of samples for later instances
     if isinstance(counts, int):
         counts = np.array([counts] * num_groups)
 
-    if not len(counts) == num_groups:
-        raise ValueError('If counts is a 1-D array, there must be a count to'
-                         ' draw for each group.')
-
-    # Checks the group length
-    samp_lens = [len(sample) for sample in samples]
-    # Checks the group length
-    if mode == 'matched' and np.array([samp_lens[i] != samp_lens[i+1] for i in
-                                       range(num_groups-1)]).all():
-        raise ValueError('In "matched" mode, each sample must have the same'
-                         ' number of observations.')
-    if np.array([samp_lens[i] < counts[i] for i in range(num_groups)]).any():
-        raise ValueError('You cannot choose more observations that exist '
-                         'in a sample.')
-
-    # Prealocates the pvalue matrix
-    p_values = np.zeros((num_iter))
-
     for idx in range(num_iter):
         if mode == "matched":
             pos = np.random.choice(np.arange(0, samp_lens[0]), counts[0],
@@ -908,11 +935,256 @@ def _compare_distributions(test, samples, counts=5, mode="ind", num_iter=1000):
             subs = [np.random.choice(np.array(pop), counts[i], replace=False)
                     for i, pop in enumerate(samples)]
 
-        p_values[idx] = test(subs)
+        p_values[:, idx] = test(subs)
+
+    if num_p == 1:
+        p_values = p_values.squeeze()
 
     return p_values
 
 
+def _check_subsample_power_inputs(test, samples, draw_mode='ind', ratio=None,
+                                  max_counts=50, counts_interval=10,
+                                  min_counts=None):
+    r"""Makes sure that everything is sane before power calculations
+
+    Parameters
+    ----------
+    test : function
+        The statistical test which accepts a list of arrays of values
+        (sample ids or numeric values) and returns a p value or one-dimensional
+        array of p values.
+    samples : array_like
+        `samples` can be a list of lists or a list of arrays where each
+        sublist or row in the array corresponds to a sampled group.
+    draw_mode : {"ind", "matched"}, optional
+        "matched" samples should be used when observations in
+        samples have corresponding observations in other groups. For instance,
+        this may be useful when working with regression data where
+        :math:`x_{1}, x_{2}, ..., x_{n}` maps to
+        :math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
+        length in "matched" mode.
+        If there is no reciprocal relationship between samples, then
+        "ind" mode should be used.
+    ratio : 1-D array, optional
+        The fraction of the sample counts which should be
+        assigned to each group. If this is a 1-D array, it must be the same
+        length as `samples`. If no value is supplied (`ratio` is None),
+        then an equal number of observations will be drawn for each sample. In
+        `matched` mode, this will be set to one.
+    max_counts : positive int, optional
+        The maximum number of samples per group to draw for effect size
+        calculation.
+    counts_interval : positive int, optional
+        The difference between each subsampling count.
+    min_counts : positive int, optional
+        How many samples should be drawn for the smallest
+        subsample. If this is None, the `counts_interval` will be used.
+
+    Returns
+    -------
+    ratio : 1-D array
+        The fraction of the sample counts which should be assigned to each
+        group.
+    num_p : positive integer
+        The number of p values returned by `test`.
+    sample_counts : array
+        The number of samples drawn at each power calculation.
+
+    Raises
+    ------
+    ValueError
+        If the `mode` is "matched", an error will occur if the arrays in
+        `samples` are not the same length.
+    ValueError
+        There is a ValueError if there are fewer samples than the minimum
+        count.
+    ValueError
+        If the `counts_interval` is greater than the difference between the
+        sample start and the max value, the function raises a ValueError.
+    ValueError
+        There are not an equal number of groups in `samples` and in `ratios`.
+    TypeError
+        `test` does not return a float or a 1-dimensional numpy array.
+
+    """
+
+    if draw_mode not in {'ind', 'matched'}:
+        raise ValueError('mode must be "matched" or "ind".')
+
+    # Determines the minimum number of ids in a category
+    id_counts = np.array([len(id_) for id_ in samples])
+    num_ids = id_counts.min()
+    # Determines the number of groups
+    num_groups = len(samples)
+
+    # Checks that "matched" mode is handled appropriately
+    if draw_mode == "matched":
+        for id_ in samples:
+            if not len(id_) == num_ids:
+                raise ValueError('Each vector in samples must be the same '
+                                 'length in "matched" draw_mode.')
+
+    # Checks the number of counts is appropriate
+    if min_counts is None:
+        min_counts = counts_interval
+    if (max_counts - min_counts) < counts_interval:
+        raise ValueError("No subsamples of the specified size can be drawn.")
+
+    # Checks the ratio argument is sane
+    if ratio is None or draw_mode == 'matched':
+        ratio = np.ones((num_groups))
+    else:
+        ratio = np.asarray(ratio)
+    if not ratio.shape == (num_groups,):
+        raise ValueError('There must be a ratio for each group.')
+
+    ratio_counts = np.array([id_counts[i] / ratio[i]
+                             for i in range(num_groups)])
+    largest = ratio_counts.min()
+
+    # Determines the number of p values returned by the test
+    p_return = test(samples)
+    if isinstance(p_return, float):
+        num_p = 1
+    elif isinstance(p_return, np.ndarray) and len(p_return.shape) == 1:
+        num_p = p_return.shape[0]
+    else:
+        raise TypeError('test must return a float or one-dimensional array.')
+
+    # Calculates the same counts
+    sample_counts = np.arange(min_counts,
+                              min(max_counts, largest),
+                              counts_interval)
+
+    return ratio, num_p, sample_counts
+
+
+def _identify_sample_groups(meta, cat, control_cats, order, strict_match):
+    """Aggregates samples matches for `control_cats` that vary by `cat`
+
+    Parameters
+    ----------
+    meta : pandas.DataFrame
+        The metadata associated with the samples.
+    cat : str, list
+        The metadata category (or a list of categories) for comparison.
+    control_cats : list
+        The metadata categories to be used as controls. For example, if you
+        wanted to vary age (`cat` = "AGE"), you might want to control for
+        gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
+    order : list
+        The order of groups in the category. This can be used
+        to limit the groups selected. For example, if there's a category with
+        groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
+        would be set to ['A', 'B'].
+    ctrl_pos : int
+        The location of the smallest group in `order`.
+    strict_match: bool, optional
+        This determines how data is grouped using `control_cats`. If a sample
+        within `meta` has an undefined value (`NaN`) for any of the columns in
+        `control_cats`, the sample will not be considered as having a match and
+        will be ignored when `strict_match` is True. If `strict_match` is
+        False, missing values (NaN) in the `control_cats` can be considered
+        matches.
+
+    Returns
+    -------
+    meta_pairs : dict
+        Describes the categories matched for metadata. The
+        `control_cat`-grouped samples are numbered, corresponding to the
+        second list in `index`. The group is keyed to the list of sample arrays
+        with the same length of `order`.
+    index : list
+        A list of numpy arrays describing the positions of samples to be drawn.
+        The first array is an index array. The second gives an integer
+        corresponding to the `control_cat`-group, and the third lists the
+        position of the reference group sample in the list of samples.
+
+    """
+
+    # Sets up variables to be filled
+    meta_pairs = {}
+    index = []
+    i1 = 0
+
+    # Groups the data by the control groups
+    ctrl_groups = meta.groupby(control_cats).groups
+    # Identifies the samples that satisfy the control pairs
+    for (g, ids) in viewitems(ctrl_groups):
+        # If strict_match, Skips over data that has nans
+        if not _check_nans(g, switch=True) and strict_match:
+            continue
+        # Draws the samples that are matched for control cats
+        m_ids = meta.loc[ids].groupby(cat).groups
+        # Checks if samples from the cat groups are represented in those
+        # Samples
+        ids_vec = id_vecs = [m_ids[o] for o in order if o in
+                             m_ids]
+        # If all groups are represented, the index and results are retained
+        if len(ids_vec) == len(order):
+            min_vec = np.array([len(v) for v in id_vecs])
+            loc_vec = np.arange(0, min_vec.min())
+            meta_pairs[i1] = id_vecs
+            index.append(np.zeros(loc_vec.shape) + i1)
+            i1 = i1 + 1
+        # If the groups are not represented, an empty array gets passed
+        else:
+            index.append(np.array([]))
+
+    # Converts index to a 1d array
+    index = np.hstack(index)
+
+    # If index is empty, sets up meta_paris with a no key.
+    if not meta_pairs:
+        meta_pairs['no'] = order
+
+    return meta_pairs, index
+
+
+def _draw_paired_samples(meta_pairs, index, num_samps):
+    """Draws a random set of ids from a matched list
+
+    Parameters
+    ----------
+    meta_pairs : dict
+        Describes the categories matched for metadata. The
+        `control_cat`-grouped samples are numbered, corresponding to the
+        second list in `index`. The group is keyed to the list of sample arrays
+        with the same length of `order`.
+    index : list
+        A list of numpy arrays describing the positions of samples to be drawn.
+        The first array is an index array. The second gives an integer
+        corresponding to the `control_cat`-group, and the third lists the
+        position of the reference group sample in the list of samples.
+
+    Returns
+    -------
+    ids : list
+        A set of randomly selected ids groups from each group.
+    """
+
+    # Handles an empty paired vector
+    if 'no' in meta_pairs:
+        return [np.array([]) for o in meta_pairs['no']]
+
+    # Identifies the absolute positions of the control group being drawn
+    set_pos = np.random.choice(index, int(num_samps),
+                               replace=False).astype(int)
+
+    subs = []
+
+    # Draws the other groups
+    for set_, num_ in viewitems(collections.Counter(set_pos)):
+        r2 = [np.random.choice(col, num_, replace=False) for col in
+              meta_pairs[set_]]
+        subs.append(r2)
+
+    ids = [np.hstack(ids) for ids in zip(*subs)]
+
+    return ids
+
+
 def _calculate_power_curve(test, samples, sample_counts, ratio=None,
                            mode='ind', num_iter=1000, alpha=0.05):
     r"""Generates an empirical power curve for the samples.
@@ -920,7 +1192,7 @@ def _calculate_power_curve(test, samples, sample_counts, ratio=None,
     Parameters
     ----------
     test : function
-        The statistical test which accepts an list of arrays of values and
+        The statistical test which accepts a list of arrays of values and
         returns a p value.
     samples : array_like
         `samples` can be a list of lists or an array where each sublist or row
@@ -947,17 +1219,13 @@ def _calculate_power_curve(test, samples, sample_counts, ratio=None,
     -------
     p_values : array
         The p-values associated with the input sample counts.
-
     Raises
     ------
     ValueError
         If ratio is an array and ratio is not the same length as samples
-
     """
-
     # Casts array-likes to arrays
     sample_counts = np.asarray(sample_counts)
-
     # Determines the number of groups
     num_groups = len(samples)
     num_samps = len(sample_counts)
@@ -969,7 +1237,6 @@ def _calculate_power_curve(test, samples, sample_counts, ratio=None,
         vec = False
         num_crit = alpha.shape[0]
         pwr = np.zeros((num_crit, num_samps))
-
     # Checks the ratio argument
     if ratio is None:
         ratio = np.ones((num_groups))
@@ -984,6 +1251,7 @@ def _calculate_power_curve(test, samples, sample_counts, ratio=None,
             ps = _compare_distributions(test=test,
                                         samples=samples,
                                         counts=count,
+                                        num_p=1,
                                         num_iter=num_iter,
                                         mode=mode)
             if vec:
diff --git a/skbio/stats/spatial.py b/skbio/stats/spatial.py
index 903847e..9cfcb2a 100644
--- a/skbio/stats/spatial.py
+++ b/skbio/stats/spatial.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 r"""
 Spatial Statistics (:mod:`skbio.stats.spatial`)
 ===============================================
@@ -16,7 +15,6 @@ Functions
    procrustes
 
 """
-from __future__ import absolute_import, division, print_function
 
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
@@ -26,9 +24,15 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import numpy as np
 
+from skbio.util._decorator import deprecated
+
 
+ at deprecated(as_of="0.4.0", until="0.4.1",
+            reason="You should now use scipy.spatial.procrustes.")
 def procrustes(data1, data2):
     r"""Procrustes analysis, a similarity test for two data sets
 
diff --git a/skbio/stats/tests/__init__.py b/skbio/stats/tests/__init__.py
index c99682c..3fe3dc6 100644
--- a/skbio/stats/tests/__init__.py
+++ b/skbio/stats/tests/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -7,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/stats/tests/test_composition.py b/skbio/stats/tests/test_composition.py
new file mode 100644
index 0000000..62fa3fc
--- /dev/null
+++ b/skbio/stats/tests/test_composition.py
@@ -0,0 +1,222 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+
+from unittest import TestCase, main
+import numpy as np
+import numpy.testing as npt
+from skbio.stats.composition import (closure, multiplicative_replacement,
+                                     perturb, perturb_inv, power,
+                                     clr, centralize)
+
+
+class CompositionTests(TestCase):
+
+    def setUp(self):
+        self.data1 = np.array([[2, 2, 6],
+                               [4, 4, 2]])
+        self.data2 = np.array([2, 2, 6])
+
+        self.data3 = np.array([[1, 2, 3, 0, 5],
+                               [1, 0, 0, 4, 5],
+                               [1, 2, 3, 4, 5]])
+        self.data4 = np.array([1, 2, 3, 0, 5])
+        self.data5 = [[2, 2, 6], [4, 4, 2]]
+        self.data6 = [[1, 2, 3, 0, 5],
+                      [1, 0, 0, 4, 5],
+                      [1, 2, 3, 4, 5]]
+        # Bad datasets
+        self.bad1 = np.array([1, 2, -1])
+        self.bad2 = np.array([[[1, 2, 3, 0, 5]]])
+
+    def test_closure(self):
+
+        npt.assert_allclose(closure(self.data1),
+                            np.array([[.2, .2, .6],
+                                      [.4, .4, .2]]))
+        npt.assert_allclose(closure(self.data2),
+                            np.array([.2, .2, .6]))
+        npt.assert_allclose(closure(self.data5),
+                            np.array([[.2, .2, .6],
+                                      [.4, .4, .2]]))
+        with self.assertRaises(ValueError):
+            closure(self.bad1)
+
+        with self.assertRaises(ValueError):
+            closure(self.bad2)
+
+        # make sure that inplace modification is not occurring
+        closure(self.data2)
+        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+
+    def test_perturb(self):
+        pmat = perturb(closure(self.data1),
+                       closure(np.array([1, 1, 1])))
+        npt.assert_allclose(pmat,
+                            np.array([[.2, .2, .6],
+                                      [.4, .4, .2]]))
+
+        pmat = perturb(closure(self.data1),
+                       closure(np.array([10, 10, 20])))
+        npt.assert_allclose(pmat,
+                            np.array([[.125, .125, .75],
+                                      [1./3, 1./3, 1./3]]))
+
+        pmat = perturb(closure(self.data1),
+                       closure(np.array([10, 10, 20])))
+        npt.assert_allclose(pmat,
+                            np.array([[.125, .125, .75],
+                                      [1./3, 1./3, 1./3]]))
+
+        pmat = perturb(closure(self.data2),
+                       closure([1, 2, 1]))
+        npt.assert_allclose(pmat, np.array([1./6, 2./6, 3./6]))
+
+        pmat = perturb(closure(self.data5),
+                       closure(np.array([1, 1, 1])))
+        npt.assert_allclose(pmat,
+                            np.array([[.2, .2, .6],
+                                      [.4, .4, .2]]))
+
+        with self.assertRaises(ValueError):
+            perturb(closure(self.data5), self.bad1)
+
+        # make sure that inplace modification is not occurring
+        perturb(self.data2, [1, 2, 3])
+        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+
+    def test_power(self):
+        pmat = power(closure(self.data1), 2)
+        npt.assert_allclose(pmat,
+                            np.array([[.04/.44, .04/.44, .36/.44],
+                                      [.16/.36, .16/.36, .04/.36]]))
+
+        pmat = power(closure(self.data2), 2)
+        npt.assert_allclose(pmat, np.array([.04, .04, .36])/.44)
+
+        pmat = power(closure(self.data5), 2)
+        npt.assert_allclose(pmat,
+                            np.array([[.04/.44, .04/.44, .36/.44],
+                                      [.16/.36, .16/.36, .04/.36]]))
+
+        with self.assertRaises(ValueError):
+            power(self.bad1, 2)
+
+        # make sure that inplace modification is not occurring
+        power(self.data2, 4)
+        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+
+    def test_perturb_inv(self):
+        pmat = perturb_inv(closure(self.data1),
+                           closure([.1, .1, .1]))
+        imat = perturb(closure(self.data1),
+                       closure([10, 10, 10]))
+        npt.assert_allclose(pmat, imat)
+        pmat = perturb_inv(closure(self.data1),
+                           closure([1, 1, 1]))
+        npt.assert_allclose(pmat,
+                            closure([[.2, .2, .6],
+                                     [.4, .4, .2]]))
+        pmat = perturb_inv(closure(self.data5),
+                           closure([.1, .1, .1]))
+        imat = perturb(closure(self.data1), closure([10, 10, 10]))
+        npt.assert_allclose(pmat, imat)
+
+        with self.assertRaises(ValueError):
+            perturb_inv(closure(self.data1), self.bad1)
+
+        # make sure that inplace modification is not occurring
+        perturb_inv(self.data2, [1, 2, 3])
+        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+
+    def test_multiplicative_replacement(self):
+        amat = multiplicative_replacement(closure(self.data3))
+        npt.assert_allclose(amat,
+                            np.array([[0.087273, 0.174545, 0.261818,
+                                       0.04, 0.436364],
+                                      [0.092, 0.04, 0.04, 0.368, 0.46],
+                                      [0.066667, 0.133333, 0.2,
+                                       0.266667, 0.333333]]),
+                            rtol=1e-5, atol=1e-5)
+
+        amat = multiplicative_replacement(closure(self.data4))
+        npt.assert_allclose(amat,
+                            np.array([0.087273, 0.174545, 0.261818,
+                                      0.04, 0.436364]),
+                            rtol=1e-5, atol=1e-5)
+
+        amat = multiplicative_replacement(closure(self.data6))
+        npt.assert_allclose(amat,
+                            np.array([[0.087273, 0.174545, 0.261818,
+                                       0.04, 0.436364],
+                                      [0.092, 0.04, 0.04, 0.368, 0.46],
+                                      [0.066667, 0.133333, 0.2,
+                                       0.266667, 0.333333]]),
+                            rtol=1e-5, atol=1e-5)
+
+        with self.assertRaises(ValueError):
+            multiplicative_replacement(self.bad1)
+        with self.assertRaises(ValueError):
+            multiplicative_replacement(self.bad2)
+
+        # make sure that inplace modification is not occurring
+        multiplicative_replacement(self.data4)
+        npt.assert_allclose(self.data4, np.array([1, 2, 3, 0, 5]))
+
+    def test_clr(self):
+        cmat = clr(closure(self.data1))
+        A = np.array([.2, .2, .6])
+        B = np.array([.4, .4, .2])
+
+        npt.assert_allclose(cmat,
+                            [np.log(A / np.exp(np.log(A).mean())),
+                             np.log(B / np.exp(np.log(B).mean()))])
+        cmat = clr(closure(self.data2))
+        A = np.array([.2, .2, .6])
+        npt.assert_allclose(cmat,
+                            np.log(A / np.exp(np.log(A).mean())))
+
+        cmat = clr(closure(self.data5))
+        A = np.array([.2, .2, .6])
+        B = np.array([.4, .4, .2])
+
+        npt.assert_allclose(cmat,
+                            [np.log(A / np.exp(np.log(A).mean())),
+                             np.log(B / np.exp(np.log(B).mean()))])
+        with self.assertRaises(ValueError):
+            clr(self.bad1)
+        with self.assertRaises(ValueError):
+            clr(self.bad2)
+
+        # make sure that inplace modification is not occurring
+        clr(self.data2)
+        npt.assert_allclose(self.data2, np.array([2, 2, 6]))
+
+    def test_centralize(self):
+        cmat = centralize(closure(self.data1))
+        npt.assert_allclose(cmat,
+                            np.array([[0.22474487, 0.22474487, 0.55051026],
+                                      [0.41523958, 0.41523958, 0.16952085]]))
+        cmat = centralize(closure(self.data5))
+        npt.assert_allclose(cmat,
+                            np.array([[0.22474487, 0.22474487, 0.55051026],
+                                      [0.41523958, 0.41523958, 0.16952085]]))
+
+        with self.assertRaises(ValueError):
+            centralize(self.bad1)
+        with self.assertRaises(ValueError):
+            centralize(self.bad2)
+
+        centralize(self.data1)
+        npt.assert_allclose(self.data1,
+                            np.array([[2, 2, 6],
+                                      [4, 4, 2]]))
+
+if __name__ == "__main__":
+    main()
diff --git a/skbio/stats/tests/test_gradient.py b/skbio/stats/tests/test_gradient.py
index 3e142c1..1515e90 100644
--- a/skbio/stats/tests/test_gradient.py
+++ b/skbio/stats/tests/test_gradient.py
@@ -1,12 +1,11 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
+
 from __future__ import absolute_import, division, print_function
 from six import StringIO
 from future.builtins import zip
@@ -19,7 +18,7 @@ import pandas as pd
 import numpy.testing as npt
 import pandas.util.testing as pdt
 
-from skbio.util import get_data_path
+from skbio.util import get_data_path, assert_data_frame_almost_equal
 from skbio.stats.gradient import (GradientANOVA, AverageGradientANOVA,
                                   TrajectoryGradientANOVA,
                                   FirstDifferenceGradientANOVA,
@@ -243,7 +242,7 @@ class GradientTests(BaseTests):
                                       's8': np.array([20.3428571428])},
                                      orient='index')
         obs = _weight_by_vector(trajectory, w_vector)
-        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+        assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
 
         trajectory = pd.DataFrame.from_dict({'s1': np.array([1]),
                                              's2': np.array([2]),
@@ -265,7 +264,7 @@ class GradientTests(BaseTests):
                                       },
                                      orient='index')
         obs = _weight_by_vector(trajectory, w_vector)
-        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+        assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
 
         trajectory = pd.DataFrame.from_dict({'s2': np.array([2]),
                                              's3': np.array([3]),
@@ -280,7 +279,7 @@ class GradientTests(BaseTests):
                                       's4': np.array([4]), 's5': np.array([5]),
                                       's6': np.array([6])}, orient='index')
         obs = _weight_by_vector(trajectory, w_vector)
-        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+        assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
 
         trajectory = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3]),
                                              's2': np.array([2, 3, 4]),
@@ -294,9 +293,9 @@ class GradientTests(BaseTests):
                                       's2': np.array([2, 3, 4]),
                                       's3': np.array([5, 6, 7]),
                                       's4': np.array([8, 9, 10])},
-                                     orient='index')
+                                     orient='index').astype(np.float64)
         obs = _weight_by_vector(trajectory, w_vector)
-        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+        assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
 
         sample_ids = ['PC.356', 'PC.481', 'PC.355', 'PC.593', 'PC.354']
         trajectory = pd.DataFrame.from_dict({'PC.356': np.array([5.65948525,
@@ -335,7 +334,7 @@ class GradientTests(BaseTests):
                                       }, orient='index')
         obs = _weight_by_vector(trajectory.ix[sample_ids],
                                 w_vector[sample_ids])
-        pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
+        assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
 
     def test_weight_by_vector_single_element(self):
         trajectory = pd.DataFrame.from_dict({'s1': np.array([42])},
@@ -343,7 +342,7 @@ class GradientTests(BaseTests):
         w_vector = pd.Series(np.array([5]), ['s1']).astype(np.float64)
 
         obs = _weight_by_vector(trajectory, w_vector)
-        pdt.assert_frame_equal(obs, trajectory)
+        assert_data_frame_almost_equal(obs, trajectory)
 
     def test_weight_by_vector_error(self):
         """Raises an error with erroneous inputs"""
@@ -477,11 +476,11 @@ class GradientANOVATests(BaseTests):
         # Test with weighted = False
         bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
 
-        pdt.assert_frame_equal(bv._coords, self.coords_3axes)
+        assert_data_frame_almost_equal(bv._coords, self.coords_3axes)
         exp_prop_expl = np.array([25.6216900347, 15.7715955926,
                                   14.1215046787])
         npt.assert_equal(bv._prop_expl, exp_prop_expl)
-        pdt.assert_frame_equal(bv._metadata_map, self.metadata_map)
+        assert_data_frame_almost_equal(bv._metadata_map, self.metadata_map)
         self.assertTrue(bv._weighting_vector is None)
         self.assertFalse(bv._weighted)
 
@@ -489,13 +488,13 @@ class GradientANOVATests(BaseTests):
         bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
                            sort_category='Weight', weighted=True)
 
-        pdt.assert_frame_equal(bv._coords, self.coords_3axes)
+        assert_data_frame_almost_equal(bv._coords, self.coords_3axes)
         npt.assert_equal(bv._prop_expl, exp_prop_expl)
-        pdt.assert_frame_equal(bv._metadata_map, self.metadata_map)
+        assert_data_frame_almost_equal(bv._metadata_map, self.metadata_map)
         exp_weighting_vector = pd.Series(
             np.array([60, 55, 50, 52, 57, 65, 68, 70, 72]),
             ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607',
-             'PC.634', 'PC.635', 'PC.636']
+             'PC.634', 'PC.635', 'PC.636'], name='Weight'
             ).astype(np.float64)
         pdt.assert_series_equal(bv._weighting_vector, exp_weighting_vector)
         self.assertTrue(bv._weighted)
@@ -578,17 +577,21 @@ class GradientANOVATests(BaseTests):
 
         # Takes a subset from metadata_map
         bv = GradientANOVA(subset_coords, self.prop_expl, self.metadata_map)
-        pdt.assert_frame_equal(bv._coords.sort(axis=0),
-                               subset_coords.sort(axis=0))
-        pdt.assert_frame_equal(bv._metadata_map.sort(axis=0),
-                               subset_metadata_map.sort(axis=0))
+        assert_data_frame_almost_equal(
+            bv._coords.sort(axis=0),
+            subset_coords.sort(axis=0))
+        assert_data_frame_almost_equal(
+            bv._metadata_map.sort(axis=0),
+            subset_metadata_map.sort(axis=0))
 
         # Takes a subset from coords
         bv = GradientANOVA(self.coords, self.prop_expl, subset_metadata_map)
-        pdt.assert_frame_equal(bv._coords.sort(axis=0),
-                               subset_coords.sort(axis=0))
-        pdt.assert_frame_equal(bv._metadata_map.sort(axis=0),
-                               subset_metadata_map.sort(axis=0))
+        assert_data_frame_almost_equal(
+            bv._coords.sort(axis=0),
+            subset_coords.sort(axis=0))
+        assert_data_frame_almost_equal(
+            bv._metadata_map.sort(axis=0),
+            subset_metadata_map.sort(axis=0))
 
         # Takes a subset from metadata_map and coords at the same time
         coord_data = {
@@ -621,16 +624,18 @@ class GradientANOVATests(BaseTests):
             {'PC.355': np.array([0.236467470907, 0.21863434374,
                                  -0.0301637746424])},
             orient='index')
-        pdt.assert_frame_equal(bv._coords.sort(axis=0),
-                               exp_coords.sort(axis=0))
+        assert_data_frame_almost_equal(
+            bv._coords.sort(axis=0),
+            exp_coords.sort(axis=0))
         exp_metadata_map = pd.DataFrame.from_dict(
             {'PC.355': {'Treatment': 'Control',
                         'DOB': '20061218',
                         'Weight': '55',
                         'Description': 'Control_mouse_I.D._355'}},
             orient='index')
-        pdt.assert_frame_equal(bv._metadata_map.sort(axis=0),
-                               exp_metadata_map.sort(axis=0))
+        assert_data_frame_almost_equal(
+            bv._metadata_map.sort(axis=0),
+            exp_metadata_map.sort(axis=0))
 
     def test_normalize_samples_error(self):
         """Raises an error if coords and metadata_map does not have samples in
diff --git a/skbio/stats/tests/test_misc.py b/skbio/stats/tests/test_misc.py
index e00f24f..c94239e 100644
--- a/skbio/stats/tests/test_misc.py
+++ b/skbio/stats/tests/test_misc.py
@@ -9,50 +9,9 @@
 from __future__ import absolute_import, division, print_function
 from unittest import TestCase, main
 
-import numpy as np
-
-from skbio.stats import p_value_to_str
 from skbio.stats._misc import _pprint_strs
 
 
-class PValueToStrTests(TestCase):
-    def setUp(self):
-        self.p_value = 0.119123123123
-
-    def test_valid_input(self):
-        obs = p_value_to_str(self.p_value, 100)
-        self.assertEqual(obs, '0.12')
-
-        obs = p_value_to_str(self.p_value, 250)
-        self.assertEqual(obs, '0.12')
-
-        obs = p_value_to_str(self.p_value, 1000)
-        self.assertEqual(obs, '0.119')
-
-        obs = p_value_to_str(0.0055623489, 999)
-        self.assertEqual(obs, '0.006')
-
-    def test_too_few_permutations(self):
-        obs = p_value_to_str(self.p_value, 9)
-        self.assertEqual(obs, 'Too few permutations to compute p-value '
-                              '(permutations = 9)')
-
-        obs = p_value_to_str(self.p_value, 1)
-        self.assertEqual(obs, 'Too few permutations to compute p-value '
-                              '(permutations = 1)')
-
-        obs = p_value_to_str(self.p_value, 0)
-        self.assertEqual(obs, 'Too few permutations to compute p-value '
-                              '(permutations = 0)')
-
-    def test_missing_or_invalid_p_value(self):
-        obs = p_value_to_str(None, 0)
-        self.assertEqual(obs, 'N/A')
-
-        obs = p_value_to_str(np.nan, 0)
-        self.assertEqual(obs, 'N/A')
-
-
 class PPrintStrsTests(TestCase):
     def test_truncation(self):
         # truncation between items (on comma)
diff --git a/skbio/stats/tests/test_power.py b/skbio/stats/tests/test_power.py
index a332d79..dcad706 100644
--- a/skbio/stats/tests/test_power.py
+++ b/skbio/stats/tests/test_power.py
@@ -1,4 +1,3 @@
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -6,7 +5,9 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
 from __future__ import absolute_import, division, print_function
+
 from unittest import TestCase, main
 
 import numpy as np
@@ -16,24 +17,43 @@ from scipy.stats import kruskal
 
 from skbio.stats.power import (subsample_power,
                                subsample_paired_power,
-                               _check_strs,
+                               _check_nans,
                                confidence_bound,
                                _calculate_power,
                                _compare_distributions,
                                _calculate_power_curve,
+                               _check_subsample_power_inputs,
+                               _identify_sample_groups,
+                               _draw_paired_samples,
+                               _get_min_size,
                                bootstrap_power_curve,
-                               paired_subsamples)
+                               paired_subsamples
+                               )
 
 
 class PowerAnalysisTest(TestCase):
 
     def setUp(self):
-        # Defines a testing function
+
+        # Defines a testing functions
         def test_meta(ids, meta, cat, div):
             """Checks thhe div metric with a kruskal wallis"""
             out = [meta.loc[id_, div] for id_ in ids]
             return kruskal(*out)[1]
+
+        def meta_f(x):
+            """Applies `test_meta` to a result"""
+            return test_meta(x, self.meta, 'INT', 'DIV')
+
+        def f(x):
+            """returns the p value of a kruskal wallis test"""
+            return kruskal(*x)[1]
+
         self.test_meta = test_meta
+        self.f = f
+        self.meta_f = meta_f
+        self.num_p = 1
+
         # Sets the random seed
         np.random.seed(5)
         # Sets up the distributions of data for use
@@ -46,8 +66,6 @@ class PowerAnalysisTest(TestCase):
         self.alpha = np.power(10, np.array([-1, -1.301, -2, -3])).round(3)
         # Sets up a vector of samples
         self.num_samps = np.arange(10, 100, 10)
-        # Sets up the test function, a rank-sum test
-        self.f = lambda x: kruskal(*x)[1]
         # Sets up a mapping file
         meta = {'GW': {'INT': 'N', 'ABX': np.nan, 'DIV': 19.5, 'AGE': '30s',
                        'SEX': 'M'},
@@ -74,7 +92,9 @@ class PowerAnalysisTest(TestCase):
                 'NR': {'INT': 'Y', 'ABX': 'Y', 'DIV': 15.7, 'AGE': '20s',
                        'SEX': 'F'}}
         self.meta = pd.DataFrame.from_dict(meta, orient='index')
-        self.meta_f = lambda x: test_meta(x, self.meta, 'INT', 'DIV')
+        self.meta_pairs = {0: [['GW', 'SR', 'TS'], ['CB', 'LF', 'PC']],
+                           1: [['MM', 'PP', 'WM'], ['CD', 'MH', 'NR']]}
+        self.pair_index = np.array([0, 0, 0, 1, 1, 1])
         self.counts = np.array([5, 15, 25, 35, 45])
         self.powers = [np.array([[0.105, 0.137, 0.174, 0.208, 0.280],
                                  [0.115, 0.135, 0.196, 0.204, 0.281],
@@ -99,33 +119,13 @@ class PowerAnalysisTest(TestCase):
         self.cat = "AGE"
         self.control_cats = ['INT', 'ABX']
 
-    def test_subsample_power_matched_relationship_error(self):
-        with self.assertRaises(ValueError):
-            subsample_power(self.f,
-                            samples=[np.ones((2)), np.ones((5))],
-                            draw_mode="matched")
-
-    def test_subsample_power_min_observations_error(self):
-        with self.assertRaises(ValueError):
-            subsample_power(self.f,
-                            samples=[np.ones((2)), np.ones((5))])
-
-    def test_subsample_power_interval_error(self):
-        with self.assertRaises(ValueError):
-            subsample_power(self.f,
-                            samples=[np.ones((3)), np.ones((5))],
-                            min_observations=2,
-                            min_counts=5,
-                            counts_interval=1000,
-                            max_counts=7)
-
     def test_subsample_power_defaults(self):
         test_p, test_c = subsample_power(self.f, self.pop,
                                          num_iter=10, num_runs=5)
         self.assertEqual(test_p.shape, (5, 4))
         npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
 
-    def test_subsample_power(self):
+    def test_subsample_power_counts(self):
         test_p, test_c = subsample_power(self.f,
                                          samples=self.pop,
                                          num_iter=10,
@@ -134,54 +134,86 @@ class PowerAnalysisTest(TestCase):
         self.assertEqual(test_p.shape, (2, 5))
         npt.assert_array_equal(np.arange(5, 50, 10), test_c)
 
-    def test_subsample_paired_power_min_observations_error(self):
-        with self.assertRaises(ValueError):
-            subsample_paired_power(self.f,
-                                   self.meta,
-                                   cat=self.cat,
-                                   control_cats=self.control_cats)
+    def test_subsample_power_matches(self):
+        test_p, test_c = subsample_power(self.f,
+                                         samples=self.pop,
+                                         num_iter=10,
+                                         num_runs=5,
+                                         draw_mode="matched")
+        self.assertEqual(test_p.shape, (5, 4))
+        npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
 
-    def test_subsample_paired_power_interval_error(self):
-        with self.assertRaises(ValueError):
-            subsample_paired_power(self.f,
-                                   self.meta,
-                                   cat='INT',
-                                   control_cats=['SEX', 'AGE'],
-                                   min_observations=2,
-                                   counts_interval=12,
-                                   min_counts=5,
-                                   max_counts=7)
+    def test_subsample_power_multi_p(self):
+        test_p, test_c = subsample_power(lambda x: np.array([0.5, 0.5]),
+                                         samples=self.pop,
+                                         num_iter=10,
+                                         num_runs=5)
+        self.assertEqual(test_p.shape, (5, 4, 2))
+        npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
 
     def test_subsample_paired_power(self):
-        known_c = np.array([1, 2, 3, 4, 5])
+        known_c = np.array([1, 2, 3, 4])
         # Sets up the handling values
         cat = 'INT'
         control_cats = ['SEX']
+
         # Tests for the control cats
         test_p, test_c = subsample_paired_power(self.meta_f,
                                                 meta=self.meta,
                                                 cat=cat,
                                                 control_cats=control_cats,
-                                                min_observations=1,
                                                 counts_interval=1,
                                                 num_iter=10,
                                                 num_runs=2)
         # Test the output shapes are sane
-        npt.assert_array_equal(test_p.shape, (2, 5))
+        self.assertEqual(test_p.shape, (2, 4))
         npt.assert_array_equal(known_c, test_c)
 
-    def test__check_strs_str(self):
-        self.assertTrue(_check_strs('string'))
+    def test_subsample_paired_power_multi_p(self):
+        def f(x):
+            return np.array([0.5, 0.5, 0.005])
+        cat = 'INT'
+        control_cats = ['SEX']
+        # Tests for the control cats
+        test_p, test_c = subsample_paired_power(f,
+                                                meta=self.meta,
+                                                cat=cat,
+                                                control_cats=control_cats,
+                                                counts_interval=1,
+                                                num_iter=10,
+                                                num_runs=2)
+        self.assertEqual(test_p.shape, (2, 4, 3))
+
+    def test_check_nans_str(self):
+        self.assertTrue(_check_nans('string'))
+
+    def test_check_nans_num(self):
+        self.assertTrue(_check_nans(4.2))
 
-    def test__check_strs_num(self):
-        self.assertTrue(_check_strs(4.2))
+    def test__check_nans_nan(self):
+        self.assertFalse(_check_nans(np.nan))
 
-    def test__check_str_nan(self):
-        self.assertFalse(_check_strs(np.nan))
+    def test__check_nans_clean_list(self):
+        self.assertTrue(_check_nans(['foo', 'bar'], switch=True))
+
+    def test__check_nans_list_nan(self):
+        self.assertFalse(_check_nans(['foo', np.nan], switch=True))
 
     def test__check_str_error(self):
         with self.assertRaises(TypeError):
-            _check_strs(self.f)
+            _check_nans(self.f)
+
+    def test__get_min_size_strict(self):
+        known = 5
+        test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
+                             True)
+        self.assertEqual(test, known)
+
+    def test__get_min_size_relaxed(self):
+        known = 5
+        test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
+                             False)
+        self.assertEqual(known, test)
 
     def test_confidence_bound_default(self):
         # Sets the know confidence bound
@@ -235,28 +267,21 @@ class PowerAnalysisTest(TestCase):
         # Checks the test value
         npt.assert_almost_equal(known, test)
 
-    def test__compare_distributions_mode_error(self):
-        with self.assertRaises(ValueError):
-            _compare_distributions(self.f, self.samps, mode='fig')
-
-    def test__compare_distributions_count_error(self):
-        with self.assertRaises(ValueError):
-            _compare_distributions(self.f, self.samps, counts=[1, 2, 3],
-                                   num_iter=100)
-
-    def test__compare_distributions_matched_length_error(self):
-        with self.assertRaises(ValueError):
-            _compare_distributions(self.f, [np.ones((5)), np.zeros((6))],
-                                   mode="matched")
+    def test__calculate_power_n(self):
+        crit = 0.025
+        known = np.array([0.5, 0.5])
+        alpha = np.vstack((self.alpha, self.alpha))
+        test = _calculate_power(alpha, crit)
+        npt.assert_almost_equal(known, test)
 
     def test__compare_distributions_sample_counts_error(self):
         with self.assertRaises(ValueError):
-            _compare_distributions(self.f, [self.pop[0][:5], self.pop[1]],
-                                   25)
+            _compare_distributions(self.f, [self.pop[0][:5], self.pop[1]], 1,
+                                   counts=25)
 
     def test__compare_distributions_all_mode(self):
         known = np.ones((100))*0.0026998
-        test = _compare_distributions(self.f, self.samps, num_iter=100)
+        test = _compare_distributions(self.f, self.samps, 1, num_iter=100)
         npt.assert_allclose(known, test, 5)
 
     def test__compare_distributions_matched_mode(self):
@@ -264,14 +289,72 @@ class PowerAnalysisTest(TestCase):
         known_mean = 0.162195
         known_std = 0.121887
         known_shape = (100,)
-        # Sets the sample value
         # Tests the sample value
-        test = _compare_distributions(self.f, self.pop, mode='matched',
-                                      num_iter=100)
+        test = _compare_distributions(self.f, self.pop, self.num_p,
+                                      mode='matched', num_iter=100)
         npt.assert_allclose(known_mean, test.mean(), rtol=0.1, atol=0.02)
         npt.assert_allclose(known_std, test.std(), rtol=0.1, atol=0.02)
         self.assertEqual(known_shape, test.shape)
 
+    def test__compare_distributions_draw_mode(self):
+        draw_mode = 'Ultron'
+        with self.assertRaises(ValueError):
+            _check_subsample_power_inputs(self.f, self.pop, draw_mode,
+                                          self.num_p)
+
+    def test__compare_distributions_multiple_returns(self):
+        known = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
+
+        def f(x):
+            return np.array([1, 2, 3])
+
+        test = _compare_distributions(f, self.pop, 3, mode='matched',
+                                      num_iter=3)
+        npt.assert_array_equal(known, test)
+
+    def test_check_subsample_power_inputs_matched_mode(self):
+        with self.assertRaises(ValueError):
+            _check_subsample_power_inputs(self.f,
+                                          samples=[np.ones((2)), np.ones((5))],
+                                          draw_mode="matched")
+
+    def test_check_subsample_power_inputs_counts(self):
+        with self.assertRaises(ValueError):
+            _check_subsample_power_inputs(self.f,
+                                          samples=[np.ones((3)), np.ones((5))],
+                                          min_counts=5,
+                                          counts_interval=1000,
+                                          max_counts=7)
+
+    def test_check_subsample_power_inputs_ratio(self):
+        with self.assertRaises(ValueError):
+            _check_subsample_power_inputs(self.f,
+                                          self.samps,
+                                          ratio=np.array([1, 2, 3]))
+
+    def test_check_subsample_power_inputs_test(self):
+        # Defines a test function
+        def test(x):
+            return 'Hello World!'
+        with self.assertRaises(TypeError):
+            _check_subsample_power_inputs(test, self.samps)
+
+    def test_check_sample_power_inputs(self):
+        # Defines the know returns
+        known_num_p = 1
+        known_ratio = np.ones((2))
+        known_counts = np.arange(2, 10, 2)
+        # Runs the code for the returns
+        test_ratio, test_num_p, test_counts = \
+            _check_subsample_power_inputs(self.f,
+                                          self.samps,
+                                          counts_interval=2,
+                                          max_counts=10)
+        # Checks the returns are sane
+        self.assertEqual(known_num_p, test_num_p)
+        npt.assert_array_equal(known_ratio, test_ratio)
+        npt.assert_array_equal(known_counts, test_counts)
+
     def test__calculate_power_curve_ratio_error(self):
         with self.assertRaises(ValueError):
             _calculate_power_curve(self.f, self.pop, self.num_samps,
@@ -279,17 +362,16 @@ class PowerAnalysisTest(TestCase):
                                    num_iter=100)
 
     def test__calculate_power_curve_default(self):
-        # Sets the know output
+        # Sets the known output
         known = np.array([0.509, 0.822, 0.962, 0.997, 1.000, 1.000, 1.000,
-                          1.000,  1.000])
-
-        # Generates the test values.
+                          1.000, 1.000])
+        # Generates the test values
         test = _calculate_power_curve(self.f,
                                       self.pop,
                                       self.num_samps,
                                       num_iter=100)
         # Checks the samples returned sanely
-        npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
+        npt.assert_allclose(test, known, rtol=0.1, atol=0.01)
 
     def test__calculate_power_curve_alpha(self):
         # Sets the know output
@@ -327,27 +409,28 @@ class PowerAnalysisTest(TestCase):
                                1.000, 1.000,  1.000])
         known_bound = np.array([0.03, 0.02, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00,
                                 0.00])
+
         # Generates the test values
         test_mean, test_bound = bootstrap_power_curve(self.f,
                                                       self.pop,
                                                       self.num_samps,
                                                       num_iter=100)
+
         # Checks the function returned sanely
         npt.assert_allclose(test_mean, known_mean, rtol=0.05, atol=0.05)
         npt.assert_allclose(test_bound, known_bound, rtol=0.1, atol=0.01)
 
     def test_paired_subsamples_default(self):
         # Sets the known np.array set
-        known_array = [sorted(['MM', 'SR', 'TS', 'GW', 'PP', 'WM']),
-                       sorted(['CD', 'LF', 'PC', 'CB', 'MH', 'NR'])]
+        known_array = [{'MM', 'SR', 'TS', 'GW', 'PP', 'WM'},
+                       {'CD', 'LF', 'PC', 'CB', 'MH', 'NR'}]
 
         # Gets the test value
         cat = 'INT'
         control_cats = ['SEX', 'AGE']
         test_array = paired_subsamples(self.meta, cat, control_cats)
-        test_array[0] = sorted(test_array[0])
-        test_array[1] = sorted(test_array[1])
-        npt.assert_array_equal(known_array, test_array)
+        self.assertEqual(known_array[0], set(test_array[0]))
+        self.assertEqual(known_array[1], set(test_array[1]))
 
     def test_paired_subsamples_break(self):
         # Sets known np.array set
@@ -376,23 +459,67 @@ class PowerAnalysisTest(TestCase):
         control_cats = ['ABX']
         test_array = paired_subsamples(self.meta, cat, control_cats,
                                        order=order)
-        for v in test_array[1]:
+        for v in test_array[0]:
             self.assertTrue(v in known_array)
         for v in test_array[1]:
             self.assertTrue(v in known_array)
 
     def test_paired_subsamples_not_strict(self):
-        known_array = [sorted(['WM', 'MM', 'GW', 'SR', 'TS']),
-                       sorted(['LF', 'PC', 'CB', 'NR', 'CD'])]
+        known_array = [{'WM', 'MM', 'GW', 'SR', 'TS'},
+                       {'LF', 'PC', 'CB', 'NR', 'CD'}]
 
         # Gets the test values
         cat = 'INT'
         control_cats = ['ABX', 'AGE']
         test_array = paired_subsamples(self.meta, cat, control_cats,
                                        strict_match=False)
-        test_array[0] = sorted(test_array[0])
-        test_array[1] = sorted(test_array[1])
-        npt.assert_array_equal(known_array, test_array)
+        self.assertEqual(set(test_array[0]), known_array[0])
+        self.assertEqual(set(test_array[1]), known_array[1])
+
+    def test__identify_sample_groups(self):
+        # Defines the know values
+        known_pairs = {0: [['MM'], ['CD']],
+                       1: [['SR'], ['LF']],
+                       2: [['TS'], ['PC']],
+                       3: [['GW'], ['CB']],
+                       4: [['PP'], ['MH']],
+                       5: [['WM'], ['NR']]}
+        known_index = np.array([0, 1, 2, 3, 4, 5])
+        test_pairs, test_index = _identify_sample_groups(self.meta,
+                                                         'INT',
+                                                         ['SEX', 'AGE'],
+                                                         order=['N', 'Y'],
+                                                         strict_match=True)
+        self.assertEqual(known_pairs.keys(), test_pairs.keys())
+        self.assertEqual(sorted(known_pairs.values()),
+                         sorted(test_pairs.values()))
+        npt.assert_array_equal(known_index, test_index)
+
+    def test__identify_sample_groups_not_strict(self):
+        # Defines the know values
+        known_pairs = {0: [['PP'], ['CD', 'NR']],
+                       1: [['MM', 'WM'], ['MH']],
+                       2: [['GW'], ['CB']]}
+        known_index = np.array([0, 1, 2])
+        test_pairs, test_index = _identify_sample_groups(self.meta,
+                                                         'INT',
+                                                         ['SEX', 'ABX'],
+                                                         order=['N', 'Y'],
+                                                         strict_match=False)
+        self.assertEqual(known_pairs.keys(), test_pairs.keys())
+        self.assertEqual(sorted(known_pairs.values()),
+                         sorted(test_pairs.values()))
+        npt.assert_array_equal(known_index, test_index)
+
+    def test__draw_paired_samples(self):
+        num_samps = 3
+        known_sets = [{'GW', 'SR', 'TS', 'MM', 'PP', 'WM'},
+                      {'CB', 'LF', 'PC', 'CD', 'MH', 'NR'}]
+        test_samps = _draw_paired_samples(self.meta_pairs, self.pair_index,
+                                          num_samps)
+        for i, t in enumerate(test_samps):
+            self.assertTrue(set(t).issubset(known_sets[i]))
+
 
 if __name__ == '__main__':
     main()
diff --git a/skbio/stats/tests/test_spatial.py b/skbio/stats/tests/test_spatial.py
index eb5c342..034bdd7 100644
--- a/skbio/stats/tests/test_spatial.py
+++ b/skbio/stats/tests/test_spatial.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -9,6 +6,8 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from unittest import TestCase, main
 
 import numpy as np
diff --git a/skbio/stats/tests/test_subsample.py b/skbio/stats/tests/test_subsample.py
index 24ba16c..0be6d15 100644
--- a/skbio/stats/tests/test_subsample.py
+++ b/skbio/stats/tests/test_subsample.py
@@ -19,7 +19,7 @@ import warnings
 import numpy as np
 import numpy.testing as npt
 
-from skbio.stats import isubsample, subsample
+from skbio.stats import isubsample
 
 
 cy_subsample = import_fresh_module('skbio.stats._subsample',
@@ -133,24 +133,6 @@ class CySubsampleCountsTests(SubsampleCountsTests, unittest.TestCase):
     module = cy_subsample
 
 
-class SubsampleTests(unittest.TestCase):
-    def test_deprecated_api(self):
-        # light test to make sure deprecated API exists; subsample_counts is
-        # more thoroughly tested
-        obs = npt.assert_warns(DeprecationWarning, subsample, [0, 5, 0], 5)
-        npt.assert_equal(obs, [0, 5, 0])
-
-        # replace=True
-        a = np.array([0, 0, 3, 4, 2, 1])
-        actual = set()
-        for i in range(1000):
-            obs = npt.assert_warns(DeprecationWarning, subsample, a, 10,
-                                   replace=True)
-            self.assertEqual(obs.sum(), 10)
-            actual.add(tuple(obs))
-        self.assertTrue(len(actual) > 1)
-
-
 class ISubsampleTests(unittest.TestCase):
     def setUp(self):
         np.random.seed(123)
diff --git a/skbio/parse/sequences/_exception.py b/skbio/test.py
similarity index 71%
rename from skbio/parse/sequences/_exception.py
rename to skbio/test.py
index 2258a93..6bc8059 100644
--- a/skbio/parse/sequences/_exception.py
+++ b/skbio/test.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,9 +6,16 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from skbio.io import FileFormatError
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from skbio.util import TestRunner
 
+test = TestRunner(__file__).test
 
-class FastqParseError(FileFormatError):
-    """Exception raised when a FASTQ formatted file cannot be parsed"""
-    pass
+if __name__ == '__main__':
+    if test():
+        sys.exit(0)
+    else:
+        sys.exit(1)
diff --git a/skbio/tests/__init__.py b/skbio/tests/__init__.py
index c99682c..3fe3dc6 100644
--- a/skbio/tests/__init__.py
+++ b/skbio/tests/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -7,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/tests/test_workflow.py b/skbio/tests/test_workflow.py
index 5982b46..297486c 100644
--- a/skbio/tests/test_workflow.py
+++ b/skbio/tests/test_workflow.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,6 +6,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from future.builtins import zip
 from collections import defaultdict
 from skbio.workflow import (Exists, NotExecuted, NotNone, Workflow, not_none,
diff --git a/skbio/tree/__init__.py b/skbio/tree/__init__.py
index 3afe453..875600c 100644
--- a/skbio/tree/__init__.py
+++ b/skbio/tree/__init__.py
@@ -52,6 +52,7 @@ Examples
 --------
 
 >>> from skbio import TreeNode
+>>> from io import StringIO
 
 A new tree can be constructed from a Newick string. Newick is a common format
 used to represent tree objects within a file. Newick was part of the original
@@ -67,12 +68,12 @@ tips A and B.
 
 Now let's construct a simple tree and dump an ASCII representation:
 
->>> tree = TreeNode.from_newick("((A, B)C, D)root;")
->>> print tree.is_root()  # is this the root of the tree?
+>>> tree = TreeNode.read(StringIO(u"((A, B)C, D)root;"))
+>>> print(tree.is_root()) # is this the root of the tree?
 True
->>> print tree.is_tip()  # is this node a tip?
+>>> print(tree.is_tip()) # is this node a tip?
 False
->>> print tree.ascii_art()
+>>> print(tree.ascii_art())
                     /-A
           /C-------|
 -root----|          \-B
@@ -91,7 +92,7 @@ The first traversal we'll cover is a preorder traversal in which you evaluate
 from root to tips, looking at the left most child first. For instance:
 
 >>> for node in tree.preorder():
-...    print node.name
+...    print(node.name)
 root
 C
 A
@@ -102,7 +103,7 @@ The next method we'll look at is a postorder traveral which will evaluate the
 left subtree tips first before walking back up the tree:
 
 >>> for node in tree.postorder():
-...    print node.name
+...    print(node.name)
 A
 B
 C
@@ -113,13 +114,13 @@ root
 or for iterating over just the internal nodes.
 
 >>> for node in tree.tips():
-...    print "Node name: %s, Is a tip: %s" % (node.name, node.is_tip())
+...    print("Node name: %s, Is a tip: %s" % (node.name, node.is_tip()))
 Node name: A, Is a tip: True
 Node name: B, Is a tip: True
 Node name: D, Is a tip: True
 
 >>> for node in tree.non_tips():
-...    print "Node name: %s, Is a tip: %s" % (node.name, node.is_tip())
+...    print("Node name: %s, Is a tip: %s" % (node.name, node.is_tip()))
 Node name: C, Is a tip: False
 
 Note, by default, `non_tips` will ignore `self` (which is the root in this
@@ -132,28 +133,30 @@ distance is the fraction of common clades present in the two trees, where a
 distance of 0 means the trees contain identical clades, and a distance of 1
 indicates the trees do not share any common clades:
 
->>> tree1 = TreeNode.from_newick("((A, B)C, (D, E)F, (G, H)I)root;")
->>> tree2 = TreeNode.from_newick("((G, H)C, (D, E)F, (B, A)I)root;")
->>> tree3 = TreeNode.from_newick("((D, B)C, (A, E)F, (G, H)I)root;")
->>> print tree1.compare_subsets(tree1)  # identity case
+>>> tree1 = TreeNode.read(StringIO(u"((A, B)C, (D, E)F, (G, H)I)root;"))
+>>> tree2 = TreeNode.read(StringIO(u"((G, H)C, (D, E)F, (B, A)I)root;"))
+>>> tree3 = TreeNode.read(StringIO(u"((D, B)C, (A, E)F, (G, H)I)root;"))
+>>> print(tree1.compare_subsets(tree1))  # identity case
 0.0
->>> print tree1.compare_subsets(tree2)  # same tree but different clade order
+>>> print(tree1.compare_subsets(tree2))  # same tree but different clade order
 0.0
->>> print tree1.compare_subsets(tree3)  # only 1 of 3 common subsets
+>>> print(tree1.compare_subsets(tree3))  # only 1 of 3 common subsets
 0.666666666667
 
 We can additionally take into account branch length when computing distances
 between trees. First, we're going to construct two new trees with described
 branch length, note the difference in the Newick strings:
 
->>> tree1 = TreeNode.from_newick("((A:0.1, B:0.2)C:0.3, D:0.4, E:0.5)root;")
->>> tree2 = TreeNode.from_newick("((A:0.4, B:0.8)C:0.3, D:0.1, E:0.5)root;")
+>>> tree1 = \
+...     TreeNode.read(StringIO(u"((A:0.1, B:0.2)C:0.3, D:0.4, E:0.5)root;"))
+>>> tree2 = \
+...     TreeNode.read(StringIO(u"((A:0.4, B:0.8)C:0.3, D:0.1, E:0.5)root;"))
 
 In these two trees, we've added on a description of length from the node to
 its parent, so for instance:
 
 >>> for node in tree1.postorder():
-...     print node.name, node.length
+...     print(node.name, node.length)
 A 0.1
 B 0.2
 C 0.3
@@ -165,9 +168,9 @@ Now let's compare two trees using the distances computed pairwise between tips
 in the trees. The distance computed, by default, is the correlation of all
 pairwise tip-to-tip distances between trees:
 
->>> print tree1.compare_tip_distances(tree1)  # identity case
+>>> print(tree1.compare_tip_distances(tree1))  # identity case
 0.0
->>> print tree1.compare_tip_distances(tree2)
+>>> print(tree1.compare_tip_distances(tree2))
 0.120492524415
 
 Prefix trees (i.e., tries) examples
@@ -235,16 +238,17 @@ Create a new trie with a list of sequences
 
 """
 
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, scikit-bio development team.
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
 
-from numpy.testing import Tester
+from skbio.util import TestRunner
 
 from ._tree import TreeNode
 from ._trie import CompressedTrie, fasta_to_pairlist
@@ -257,4 +261,4 @@ __all__ = ['TreeNode', 'CompressedTrie', 'fasta_to_pairlist', 'nj',
            'majority_rule', 'TreeError', 'NoLengthError', 'DuplicateNodeError',
            'MissingNodeError', 'NoParentError']
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/tree/_exception.py b/skbio/tree/_exception.py
index e8828e2..fcecbdd 100644
--- a/skbio/tree/_exception.py
+++ b/skbio/tree/_exception.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,6 +6,8 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 
 class TreeError(Exception):
     """General tree error"""
diff --git a/skbio/tree/_majority_rule.py b/skbio/tree/_majority_rule.py
index 81e185d..84942c8 100644
--- a/skbio/tree/_majority_rule.py
+++ b/skbio/tree/_majority_rule.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,12 +6,15 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from collections import defaultdict
 from future.builtins import zip
 
 import numpy as np
 
 from skbio.tree import TreeNode
+from skbio.util._decorator import experimental
 
 
 def _walk_clades(trees, weights):
@@ -177,6 +178,7 @@ def _build_trees(clade_counts, edge_lengths, support_attr):
     return list(nodes.values())
 
 
+ at experimental(as_of="0.4.0")
 def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
     r"""Determines consensus trees from a list of rooted trees
 
@@ -227,16 +229,17 @@ def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
     rule extended.
 
     >>> from skbio.tree import TreeNode
+    >>> from io import StringIO
     >>> trees = [
-    ... TreeNode.from_newick("(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));"),
-    ... TreeNode.from_newick("(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));"),
-    ... TreeNode.from_newick("(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));"),
-    ... TreeNode.from_newick("(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));"),
-    ... TreeNode.from_newick("(A,(B,(E,(G,((F,I),(((J,H),D),C))))));"),
-    ... TreeNode.from_newick("(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));"),
-    ... TreeNode.from_newick("(A,(B,(E,((F,I),(G,(((J,H),D),C))))));"),
-    ... TreeNode.from_newick("(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));"),
-    ... TreeNode.from_newick("(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));")]
+    ... TreeNode.read(StringIO(u"(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));")),
+    ... TreeNode.read(StringIO(u"(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));")),
+    ... TreeNode.read(StringIO(u"(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));")),
+    ... TreeNode.read(StringIO(u"(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));")),
+    ... TreeNode.read(StringIO(u"(A,(B,(E,(G,((F,I),(((J,H),D),C))))));")),
+    ... TreeNode.read(StringIO(u"(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));")),
+    ... TreeNode.read(StringIO(u"(A,(B,(E,((F,I),(G,(((J,H),D),C))))));")),
+    ... TreeNode.read(StringIO(u"(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));")),
+    ... TreeNode.read(StringIO(u"(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));"))]
     >>> consensus = majority_rule(trees, cutoff=0.5)[0]
     >>> print(consensus.ascii_art())
                                   /-E
@@ -274,10 +277,10 @@ def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
     if not all tips are present across all trees.
 
     >>> trees = [
-    ...     TreeNode.from_newick("((a,b),(c,d),(e,f))"),
-    ...     TreeNode.from_newick("(a,(c,d),b,(e,f))"),
-    ...     TreeNode.from_newick("((c,d),(e,f),b)"),
-    ...     TreeNode.from_newick("(a,(c,d),(e,f))")]
+    ...     TreeNode.read(StringIO(u"((a,b),(c,d),(e,f));")),
+    ...     TreeNode.read(StringIO(u"(a,(c,d),b,(e,f));")),
+    ...     TreeNode.read(StringIO(u"((c,d),(e,f),b);")),
+    ...     TreeNode.read(StringIO(u"(a,(c,d),(e,f));"))]
     >>> consensus_trees = majority_rule(trees)
     >>> print(len(consensus_trees))
     4
diff --git a/skbio/tree/_nj.py b/skbio/tree/_nj.py
index eb8a3ba..dc4b603 100644
--- a/skbio/tree/_nj.py
+++ b/skbio/tree/_nj.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,13 +6,17 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import numpy as np
-from six import StringIO
 
 from skbio.stats.distance import DistanceMatrix
 from skbio.tree import TreeNode
+from skbio.util._decorator import experimental
+from skbio.io._fileobject import StringIO
 
 
+ at experimental(as_of="0.4.0")
 def nj(dm, disallow_negative_branch_length=True, result_constructor=None):
     """ Apply neighbor joining for phylogenetic reconstruction.
 
diff --git a/skbio/tree/_tree.py b/skbio/tree/_tree.py
index 261bec8..13f2e41 100644
--- a/skbio/tree/_tree.py
+++ b/skbio/tree/_tree.py
@@ -8,8 +8,6 @@
 
 from __future__ import absolute_import, division, print_function
 
-import re
-import warnings
 from operator import or_
 from copy import deepcopy
 from itertools import combinations
@@ -19,13 +17,13 @@ from collections import defaultdict
 import numpy as np
 from scipy.stats import pearsonr
 from future.builtins import zip
-from six import StringIO
+import six
 
 from skbio._base import SkbioObject
 from skbio.stats.distance import DistanceMatrix
-from skbio.io import RecordError
 from ._exception import (NoLengthError, DuplicateNodeError, NoParentError,
                          MissingNodeError, TreeError)
+from skbio.util._decorator import experimental
 
 
 def distance_from_r(m1, m2):
@@ -84,6 +82,7 @@ class TreeNode(SkbioObject):
     _exclude_from_copy = set(['parent', 'children', '_tip_cache',
                               '_non_tip_cache'])
 
+    @experimental(as_of="0.4.0")
     def __init__(self, name=None, length=None, parent=None, children=None):
         self.name = name
         self.length = length
@@ -98,6 +97,7 @@ class TreeNode(SkbioObject):
         if children is not None:
             self.extend(children)
 
+    @experimental(as_of="0.4.0")
     def __repr__(self):
         r"""Returns summary of the tree
 
@@ -113,13 +113,11 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c, d)root;"))
+        >>> tree = TreeNode.read([u"((a,b)c, d)root;"])
         >>> repr(tree)
         '<TreeNode, name: root, internal node count: 1, tips count: 3>'
 
-        .. shownumpydoc
         """
         nodes = [n for n in self.traverse(include_self=False)]
         n_tips = sum([n.is_tip() for n in nodes])
@@ -130,6 +128,7 @@ class TreeNode(SkbioObject):
         return "<%s, name: %s, internal node count: %d, tips count: %d>" % \
                (classname, name, n_nontips, n_tips)
 
+    @experimental(as_of="0.4.0")
     def __str__(self):
         r"""Returns string version of self, with names and distances
 
@@ -145,32 +144,29 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> tree = TreeNode.read([u"((a,b)c);"])
         >>> str(tree)
         '((a,b)c);\n'
 
-        .. shownumpydoc
         """
+        return str(''.join(self.write([])))
 
-        fh = StringIO()
-        self.write(fh)
-        string = fh.getvalue()
-        fh.close()
-        return string
-
+    @experimental(as_of="0.4.0")
     def __iter__(self):
         r"""Node iter iterates over the `children`."""
         return iter(self.children)
 
+    @experimental(as_of="0.4.0")
     def __len__(self):
         return len(self.children)
 
+    @experimental(as_of="0.4.0")
     def __getitem__(self, i):
         r"""Node delegates slicing to `children`."""
         return self.children[i]
 
+    @experimental(as_of="0.4.0")
     def _adopt(self, node):
         r"""Update `parent` references but does NOT update `children`."""
         self.invalidate_caches()
@@ -179,6 +175,7 @@ class TreeNode(SkbioObject):
         node.parent = self
         return node
 
+    @experimental(as_of="0.4.0")
     def append(self, node):
         r"""Appends a node to `children`, in-place, cleaning up refs
 
@@ -210,6 +207,7 @@ class TreeNode(SkbioObject):
         """
         self.children.append(self._adopt(node))
 
+    @experimental(as_of="0.4.0")
     def extend(self, nodes):
         r"""Append a `list` of `TreeNode` to `self`.
 
@@ -236,8 +234,9 @@ class TreeNode(SkbioObject):
         <BLANKLINE>
 
         """
-        self.children.extend([self._adopt(n) for n in nodes])
+        self.children.extend([self._adopt(n) for n in nodes[:]])
 
+    @experimental(as_of="0.4.0")
     def pop(self, index=-1):
         r"""Remove a `TreeNode` from `self`.
 
@@ -262,9 +261,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("(a,b)c;"))
+        >>> tree = TreeNode.read([u"(a,b)c;"])
         >>> print(tree.pop(0))
         a;
         <BLANKLINE>
@@ -279,6 +277,7 @@ class TreeNode(SkbioObject):
         node.parent = None
         return node
 
+    @experimental(as_of="0.4.0")
     def remove(self, node):
         r"""Remove a node from self
 
@@ -301,9 +300,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("(a,b)c;"))
+        >>> tree = TreeNode.read([u"(a,b)c;"])
         >>> tree.remove(tree.children[0])
         True
 
@@ -314,6 +312,7 @@ class TreeNode(SkbioObject):
                 return True
         return False
 
+    @experimental(as_of="0.4.0")
     def remove_deleted(self, func):
         r"""Delete nodes in which `func(node)` evaluates `True`.
 
@@ -332,9 +331,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("(a,b)c;"))
+        >>> tree = TreeNode.read([u"(a,b)c;"])
         >>> tree.remove_deleted(lambda x: x.name == 'b')
         >>> print(tree)
         (a)c;
@@ -344,6 +342,7 @@ class TreeNode(SkbioObject):
             if func(node):
                 node.parent.remove(node)
 
+    @experimental(as_of="0.4.0")
     def prune(self):
         r"""Reconstructs correct topology after nodes have been removed.
 
@@ -366,9 +365,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
         >>> to_delete = tree.find('b')
         >>> tree.remove_deleted(lambda x: x == to_delete)
         >>> print(tree)
@@ -399,6 +397,7 @@ class TreeNode(SkbioObject):
             node.parent.append(child)
             node.parent.remove(node)
 
+    @experimental(as_of="0.4.0")
     def shear(self, names):
         """Lop off tips until the tree just has the desired tip names.
 
@@ -426,12 +425,12 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> t = TreeNode.read(StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        >>> t = TreeNode.read([u'((H:1,G:1):2,(R:0.5,M:0.7):3);'])
         >>> sheared = t.shear(['G', 'M'])
-        >>> print(sheared.to_newick(with_distances=True))
+        >>> print(sheared)
         (G:3.0,M:3.7);
+        <BLANKLINE>
 
         """
         tcopy = self.deepcopy()
@@ -450,6 +449,7 @@ class TreeNode(SkbioObject):
 
         return tcopy
 
+    @experimental(as_of="0.4.0")
     def copy(self):
         r"""Returns a copy of self using an iterative approach
 
@@ -469,9 +469,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
         >>> tree_copy = tree.copy()
         >>> tree_nodes = set([id(n) for n in tree.traverse()])
         >>> tree_copy_nodes = set([id(n) for n in tree_copy.traverse()])
@@ -514,6 +513,7 @@ class TreeNode(SkbioObject):
     __copy__ = copy
     __deepcopy__ = deepcopy = copy
 
+    @experimental(as_of="0.4.0")
     def unrooted_deepcopy(self, parent=None):
         r"""Walks the tree unrooted-style and returns a new copy
 
@@ -541,9 +541,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
+        >>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
         >>> new_tree = tree.find('d').unrooted_deepcopy()
         >>> print(new_tree)
         (b,c,(a,((f,g)h)e)d)root;
@@ -559,6 +558,7 @@ class TreeNode(SkbioObject):
         new_tree_self = new_tree.find_by_id(self.id)
         return new_tree_self.unrooted_copy(parent)
 
+    @experimental(as_of="0.4.0")
     def unrooted_copy(self, parent=None):
         r"""Walks the tree unrooted-style and returns a copy
 
@@ -588,9 +588,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
+        >>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
         >>> new_tree = tree.find('d').unrooted_copy()
         >>> print(new_tree)
         (b,c,(a,((f,g)h)e)d)root;
@@ -622,6 +621,7 @@ class TreeNode(SkbioObject):
 
         return result
 
+    @experimental(as_of="0.4.0")
     def count(self, tips=False):
         """Get the count of nodes in the tree
 
@@ -637,9 +637,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
+        >>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
         >>> print(tree.count())
         9
         >>> print(tree.count(tips=True))
@@ -651,10 +650,12 @@ class TreeNode(SkbioObject):
         else:
             return len(list(self.traverse(include_self=True)))
 
+    @experimental(as_of="0.4.0")
     def subtree(self, tip_list=None):
         r"""Make a copy of the subtree"""
         raise NotImplementedError()
 
+    @experimental(as_of="0.4.0")
     def subset(self):
         r"""Returns set of names that descend from specified node
 
@@ -672,14 +673,14 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
+        >>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
         >>> sorted(tree.subset())
-        ['a', 'b', 'c', 'f', 'g']
+        [u'a', u'b', u'c', u'f', u'g']
         """
         return frozenset({i.name for i in self.tips()})
 
+    @experimental(as_of="0.4.0")
     def subsets(self):
         r"""Return all sets of names that come from self and its descendants
 
@@ -698,14 +699,13 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("(((a,b)c,(d,e)f)h)root;"))
+        >>> tree = TreeNode.read([u"(((a,b)c,(d,e)f)h)root;"])
         >>> for s in sorted(tree.subsets()):
         ...     print(sorted(s))
-        ['a', 'b']
-        ['d', 'e']
-        ['a', 'b', 'd', 'e']
+        [u'a', u'b']
+        [u'd', u'e']
+        [u'a', u'b', u'd', u'e']
         """
         sets = []
         for i in self.postorder(include_self=False):
@@ -718,6 +718,7 @@ class TreeNode(SkbioObject):
                 i.__leaf_set = leaf_set
         return frozenset(sets)
 
+    @experimental(as_of="0.4.0")
     def root_at(self, node):
         r"""Return a new tree rooted at the provided node.
 
@@ -746,15 +747,14 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("(((a,b)c,(d,e)f)g,h)i;"))
+        >>> tree = TreeNode.read([u"(((a,b)c,(d,e)f)g,h)i;"])
         >>> print(tree.root_at('c'))
         (a,b,((d,e)f,(h)g)c)root;
         <BLANKLINE>
 
         """
-        if isinstance(node, str):
+        if isinstance(node, six.string_types):
             node = self.find(node)
 
         if not node.children:
@@ -762,6 +762,7 @@ class TreeNode(SkbioObject):
                             repr(node.name))
         return node.unrooted_deepcopy()
 
+    @experimental(as_of="0.4.0")
     def root_at_midpoint(self):
         r"""Return a new tree rooted at midpoint of the two tips farthest apart
 
@@ -789,10 +790,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("(((d:1,e:1,(g:1)f:1)c:1)b:1,h:1)"
-        ...                               "a:1;"))
+        >>> tree = TreeNode.read([u"(((d:1,e:1,(g:1)f:1)c:1)b:1,h:1)a:1;"])
         >>> print(tree.root_at_midpoint())
         ((d:1.0,e:1.0,(g:1.0)f:1.0)c:0.5,((h:1.0)b:1.0):0.5)root;
         <BLANKLINE>
@@ -842,6 +841,7 @@ class TreeNode(SkbioObject):
 
             return new_root.unrooted_copy()
 
+    @experimental(as_of="0.4.0")
     def is_tip(self):
         r"""Returns `True` if the current node has no `children`.
 
@@ -857,9 +857,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> tree = TreeNode.read([u"((a,b)c);"])
         >>> print(tree.is_tip())
         False
         >>> print(tree.find('a').is_tip())
@@ -868,6 +867,7 @@ class TreeNode(SkbioObject):
         """
         return not self.children
 
+    @experimental(as_of="0.4.0")
     def is_root(self):
         r"""Returns `True` if the current is a root, i.e. has no `parent`.
 
@@ -883,9 +883,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> tree = TreeNode.read([u"((a,b)c);"])
         >>> print(tree.is_root())
         True
         >>> print(tree.find('a').is_root())
@@ -894,6 +893,7 @@ class TreeNode(SkbioObject):
         """
         return self.parent is None
 
+    @experimental(as_of="0.4.0")
     def has_children(self):
         r"""Returns `True` if the node has `children`.
 
@@ -909,9 +909,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> tree = TreeNode.read([u"((a,b)c);"])
         >>> print(tree.has_children())
         True
         >>> print(tree.find('a').has_children())
@@ -920,6 +919,7 @@ class TreeNode(SkbioObject):
         """
         return not self.is_tip()
 
+    @experimental(as_of="0.4.0")
     def traverse(self, self_before=True, self_after=False, include_self=True):
         r"""Returns iterator over descendants
 
@@ -942,10 +942,10 @@ class TreeNode(SkbioObject):
         Note that if self is terminal, it will only be included once even if
         `self_before` and `self_after` are both `True`.
 
-        Returns
-        -------
-        GeneratorType
-            Yields successive `TreeNode` objects
+        Yields
+        ------
+        TreeNode
+            Traversed node.
 
         See Also
         --------
@@ -958,9 +958,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> tree = TreeNode.read([u"((a,b)c);"])
         >>> for node in tree.traverse():
         ...     print(node.name)
         None
@@ -980,6 +979,7 @@ class TreeNode(SkbioObject):
             else:
                 return self.tips(include_self=include_self)
 
+    @experimental(as_of="0.4.0")
     def preorder(self, include_self=True):
         r"""Performs preorder iteration over tree
 
@@ -988,10 +988,10 @@ class TreeNode(SkbioObject):
         include_self : bool
             include the initial node if True
 
-        Returns
-        -------
-        GeneratorType
-            Yields successive `TreeNode` objects
+        Yields
+        ------
+        TreeNode
+            Traversed node.
 
         See Also
         --------
@@ -1004,9 +1004,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> tree = TreeNode.read([u"((a,b)c);"])
         >>> for node in tree.preorder():
         ...     print(node.name)
         None
@@ -1023,6 +1022,7 @@ class TreeNode(SkbioObject):
             if curr.children:
                 stack.extend(curr.children[::-1])
 
+    @experimental(as_of="0.4.0")
     def postorder(self, include_self=True):
         r"""Performs postorder iteration over tree.
 
@@ -1035,10 +1035,10 @@ class TreeNode(SkbioObject):
         include_self : bool
             include the initial node if True
 
-        Returns
-        -------
-        GeneratorType
-            Yields successive `TreeNode` objects
+        Yields
+        ------
+        TreeNode
+            Traversed node.
 
         See Also
         --------
@@ -1051,9 +1051,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> tree = TreeNode.read([u"((a,b)c);"])
         >>> for node in tree.postorder():
         ...     print(node.name)
         a
@@ -1095,6 +1094,7 @@ class TreeNode(SkbioObject):
                 child_index_stack.pop()
                 child_index_stack[-1] += 1
 
+    @experimental(as_of="0.4.0")
     def pre_and_postorder(self, include_self=True):
         r"""Performs iteration over tree, visiting node before and after
 
@@ -1103,10 +1103,10 @@ class TreeNode(SkbioObject):
         include_self : bool
             include the initial node if True
 
-        Returns
-        -------
-        GeneratorType
-            Yields successive `TreeNode` objects
+        Yields
+        ------
+        TreeNode
+            Traversed node.
 
         See Also
         --------
@@ -1119,9 +1119,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c);"))
+        >>> tree = TreeNode.read([u"((a,b)c);"])
         >>> for node in tree.pre_and_postorder():
         ...     print(node.name)
         None
@@ -1170,6 +1169,7 @@ class TreeNode(SkbioObject):
                 child_index_stack.pop()
                 child_index_stack[-1] += 1
 
+    @experimental(as_of="0.4.0")
     def levelorder(self, include_self=True):
         r"""Performs levelorder iteration over tree
 
@@ -1178,10 +1178,10 @@ class TreeNode(SkbioObject):
         include_self : bool
             include the initial node if True
 
-        Returns
-        -------
-        GeneratorType
-            Yields successive `TreeNode` objects
+        Yields
+        ------
+        TreeNode
+            Traversed node.
 
         See Also
         --------
@@ -1194,9 +1194,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
         >>> for node in tree.levelorder():
         ...     print(node.name)
         None
@@ -1216,6 +1215,7 @@ class TreeNode(SkbioObject):
             if curr.children:
                 queue.extend(curr.children)
 
+    @experimental(as_of="0.4.0")
     def tips(self, include_self=False):
         r"""Iterates over tips descended from `self`.
 
@@ -1227,10 +1227,10 @@ class TreeNode(SkbioObject):
         include_self : bool
             include the initial node if True
 
-        Returns
-        -------
-        GeneratorType
-            Yields successive `TreeNode` objects
+        Yields
+        ------
+        TreeNode
+            Traversed node.
 
         See Also
         --------
@@ -1243,9 +1243,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
         >>> for node in tree.tips():
         ...     print(node.name)
         a
@@ -1258,6 +1257,7 @@ class TreeNode(SkbioObject):
             if n.is_tip():
                 yield n
 
+    @experimental(as_of="0.4.0")
     def non_tips(self, include_self=False):
         r"""Iterates over nontips descended from self
 
@@ -1271,10 +1271,10 @@ class TreeNode(SkbioObject):
         include_self : bool
             include the initial node if True
 
-        Returns
-        -------
-        GeneratorType
-            Yields successive `TreeNode` objects
+        Yields
+        ------
+        TreeNode
+            Traversed node.
 
         See Also
         --------
@@ -1287,9 +1287,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
         >>> for node in tree.non_tips():
         ...     print(node.name)
         c
@@ -1300,6 +1299,7 @@ class TreeNode(SkbioObject):
             if not n.is_tip():
                 yield n
 
+    @experimental(as_of="0.4.0")
     def invalidate_caches(self, attr=True):
         r"""Delete lookup and attribute caches
 
@@ -1328,6 +1328,7 @@ class TreeNode(SkbioObject):
                         if hasattr(n, cache):
                             delattr(n, cache)
 
+    @experimental(as_of="0.4.0")
     def create_caches(self):
         r"""Construct an internal lookups to facilitate searching by name
 
@@ -1379,6 +1380,7 @@ class TreeNode(SkbioObject):
             self._tip_cache = tip_cache
             self._non_tip_cache = non_tip_cache
 
+    @experimental(as_of="0.4.0")
     def find_all(self, name):
         r"""Find all nodes that match `name`
 
@@ -1409,9 +1411,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio.tree import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)d,(f,g)c);"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)d,(f,g)c);"])
         >>> for node in tree.find_all('c'):
         ...     print(node.name, node.children[0].name, node.children[1].name)
         c a b
@@ -1441,6 +1442,7 @@ class TreeNode(SkbioObject):
         else:
             return nodes
 
+    @experimental(as_of="0.4.0")
     def find(self, name):
         r"""Find a node by `name`.
 
@@ -1478,9 +1480,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
         >>> print(tree.find('c').name)
         c
         """
@@ -1501,6 +1502,7 @@ class TreeNode(SkbioObject):
         else:
             return node
 
+    @experimental(as_of="0.4.0")
     def find_by_id(self, node_id):
         r"""Find a node by `id`.
 
@@ -1534,9 +1536,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
         >>> print(tree.find_by_id(2).name)
         d
 
@@ -1557,6 +1558,7 @@ class TreeNode(SkbioObject):
         else:
             return node
 
+    @experimental(as_of="0.4.0")
     def find_by_func(self, func):
         r"""Find all nodes given a function
 
@@ -1565,13 +1567,13 @@ class TreeNode(SkbioObject):
         Parameters
         ----------
         func : a function
-            A function that accepts a TreeNode and returns `True` or `Fals`,
+            A function that accepts a TreeNode and returns `True` or `False`,
             where `True` indicates the node is to be yielded
 
-        Returns
-        -------
-        GeneratorType
-            A generator that yields nodes
+        Yields
+        ------
+        TreeNode
+            Node found by `func`.
 
         See Also
         --------
@@ -1581,17 +1583,17 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
         >>> func = lambda x: x.parent == tree.find('c')
         >>> [n.name for n in tree.find_by_func(func)]
-        ['a', 'b']
+        [u'a', u'b']
         """
         for node in self.traverse(include_self=True):
             if func(node):
                 yield node
 
+    @experimental(as_of="0.4.0")
     def ancestors(self):
         r"""Returns all ancestors back to the root
 
@@ -1605,11 +1607,10 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
         >>> [node.name for node in tree.find('a').ancestors()]
-        ['c', 'root']
+        [u'c', u'root']
 
         """
         result = []
@@ -1620,6 +1621,7 @@ class TreeNode(SkbioObject):
 
         return result
 
+    @experimental(as_of="0.4.0")
     def root(self):
         r"""Returns root of the tree `self` is in
 
@@ -1630,9 +1632,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
         >>> tip_a = tree.find('a')
         >>> root = tip_a.root()
         >>> root == tree
@@ -1644,6 +1645,7 @@ class TreeNode(SkbioObject):
             curr = curr.parent
         return curr
 
+    @experimental(as_of="0.4.0")
     def siblings(self):
         r"""Returns all nodes that are `children` of `self` `parent`.
 
@@ -1660,12 +1662,11 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e,f)g)root;"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e,f)g)root;"])
         >>> tip_e = tree.find('e')
         >>> [n.name for n in tip_e.siblings()]
-        ['d', 'f']
+        [u'd', u'f']
 
         """
         if self.is_root():
@@ -1676,6 +1677,7 @@ class TreeNode(SkbioObject):
 
         return result
 
+    @experimental(as_of="0.4.0")
     def neighbors(self, ignore=None):
         r"""Returns all nodes that are connected to self
 
@@ -1693,12 +1695,11 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
         >>> node_c = tree.find('c')
         >>> [n.name for n in node_c.neighbors()]
-        ['a', 'b', 'root']
+        [u'a', u'b', u'root']
 
         """
         nodes = [n for n in self.children + [self.parent] if n is not None]
@@ -1707,6 +1708,7 @@ class TreeNode(SkbioObject):
         else:
             return [n for n in nodes if n is not ignore]
 
+    @experimental(as_of="0.4.0")
     def lowest_common_ancestor(self, tipnames):
         r"""Lowest common ancestor for a list of tips
 
@@ -1727,9 +1729,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
         >>> nodes = [tree.find('a'), tree.find('b')]
         >>> lca = tree.lowest_common_ancestor(nodes)
         >>> print(lca.name)
@@ -1781,6 +1782,7 @@ class TreeNode(SkbioObject):
     lca = lowest_common_ancestor  # for convenience
 
     @classmethod
+    @experimental(as_of="0.4.0")
     def from_taxonomy(cls, lineage_map):
         """Construct a tree from a taxonomy
 
@@ -1853,25 +1855,6 @@ class TreeNode(SkbioObject):
 
         return root
 
-    @classmethod
-    def from_file(cls, tree_f):
-        """Load a tree from a file or file-like object
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``from_file`` will be removed in scikit-bio 0.3.0. It is replaced
-           by ``read``, which is a more general method for deserializing
-           TreeNode instances. ``read`` supports multiple file formats,
-           automatic file format detection, etc. by taking advantage of
-           scikit-bio's I/O registry system. See :mod:`skbio.io` for more
-           details.
-
-        """
-        warnings.warn(
-            "TreeNode.from_file is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use TreeNode.read.",
-            DeprecationWarning)
-        return cls.read(tree_f, format='newick')
-
     def _balanced_distance_to_tip(self):
         """Return the distance to tip from this node.
 
@@ -1892,6 +1875,7 @@ class TreeNode(SkbioObject):
         return distance
 
     @classmethod
+    @experimental(as_of="0.4.0")
     def from_linkage_matrix(cls, linkage_matrix, id_list):
         """Return tree from SciPy linkage matrix.
 
@@ -1941,199 +1925,7 @@ class TreeNode(SkbioObject):
 
         return node_lookup[-1]
 
-    @classmethod
-    def from_newick(cls, lines, unescape_name=True):
-        r"""Returns tree from the Clustal .dnd file format and equivalent
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``from_newick`` will be removed in scikit-bio 0.3.0. It is replaced
-           by ``read``, which is a more general method for deserializing
-           TreeNode instances. ``read`` supports multiple file formats,
-           automatic file format detection, etc. by taking advantage of
-           scikit-bio's I/O registry system. See :mod:`skbio.io` for more
-           details.
-
-        The tree is made of `skbio.TreeNode` objects, with branch
-        lengths if specified by the format.
-
-        More information on the Newick format can be found here [1]. In brief,
-        the format uses parentheses to define nesting. For instance, a three
-        taxon tree can be represented with::
-
-            ((a,b),c);
-
-        Two possible ways to represent this tree drawing it out would be::
-
-               *
-              / \
-             *   \
-            / \   \
-            a b   c
-
-            a
-             \__|___ c
-             /
-            b
-
-        The Newick format allows for defining branch length as well, for
-        example::
-
-            ((a:0.1,b:0.2):0.3,c:0.4);
-
-        This structure has a the same topology as the first example but the
-        tree now contains more information about how similar or dissimilar
-        nodes are to their parents. In the above example, we can see that tip
-        `a` has a distance of 0.1 to its parent, and `b` has a distance of 0.2
-        to its parent. We can additionally see that the clade that encloses
-        tips `a` and `b` has a distance of 0.3 to its parent, or in this case,
-        the root.
-
-        Parameters
-        ----------
-        lines : a str, a list of str, or a file-like object
-            The input newick string to parse
-        unescape_names : bool
-            Remove extraneous quote marks around names. Sometimes other
-            programs are sensitive to the characters used in names, and it
-            is essential (at times) to quote node names for compatibility.
-
-        Returns
-        -------
-        TreeNode
-            The root of the parsed tree
-
-        Raises
-        ------
-        RecordError
-            The following three conditions will trigger a `RecordError`:
-                * Unbalanced number of left and right parentheses
-                * A malformed newick string. For instance, if a semicolon is
-                    embedded within the string as opposed to at the end.
-                * If a non-newick string is passed.
-
-        See Also
-        --------
-        to_newick
-
-        Examples
-        --------
-        >>> from skbio import TreeNode
-        >>> TreeNode.from_newick("((a,b)c,(d,e)f)root;")
-        <TreeNode, name: root, internal node count: 2, tips count: 4>
-        >>> from six import StringIO
-        >>> s = StringIO("((a,b),c);")
-        >>> TreeNode.from_newick(s)
-        <TreeNode, name: unnamed, internal node count: 1, tips count: 3>
-
-        References
-        ----------
-        [1] http://evolution.genetics.washington.edu/phylip/newicktree.html
-
-        """
-        warnings.warn(
-            "TreeNode.from_newick is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use TreeNode.read.",
-            DeprecationWarning)
-
-        def _new_child(old_node):
-            """Returns new_node which has old_node as its parent."""
-            new_node = cls()
-            new_node.parent = old_node
-            if old_node is not None:
-                if new_node not in old_node.children:
-                    old_node.children.append(new_node)
-            return new_node
-
-        if isinstance(lines, str):
-            data = lines
-        else:
-            data = ''.join(lines)
-
-        # skip arb comment stuff if present: start at first paren
-        paren_index = data.find('(')
-        data = data[paren_index:]
-        left_count = data.count('(')
-        right_count = data.count(')')
-
-        if left_count != right_count:
-            raise RecordError("Found %s left parens but %s right parens." %
-                              (left_count, right_count))
-
-        curr_node = None
-        state = 'PreColon'
-        state1 = 'PreClosed'
-        last_token = None
-
-        for t in _dnd_tokenizer(data):
-            if t == ':':
-                # expecting branch length
-                state = 'PostColon'
-                # prevent state reset
-                last_token = t
-                continue
-            if t == ')' and last_token in ',(':
-                # node without name
-                new_node = _new_child(curr_node)
-                new_node.name = None
-                curr_node = new_node.parent
-                state1 = 'PostClosed'
-                last_token = t
-                continue
-            if t == ')':
-                # closing the current node
-                curr_node = curr_node.parent
-                state1 = 'PostClosed'
-                last_token = t
-                continue
-            if t == '(':
-                # opening a new node
-                curr_node = _new_child(curr_node)
-            elif t == ';':  # end of data
-                last_token = t
-                break
-            elif t == ',' and last_token in ',(':
-                # node without name
-                new_node = _new_child(curr_node)
-                new_node.name = None
-                curr_node = new_node.parent
-            elif t == ',':
-                # separator: next node adds to this node's parent
-                curr_node = curr_node.parent
-            elif state == 'PreColon' and state1 == 'PreClosed':
-                # data for the current node
-                new_node = _new_child(curr_node)
-                if unescape_name:
-                    if t.startswith("'") and t.endswith("'"):
-                        while t.startswith("'") and t.endswith("'"):
-                            t = t[1:-1]
-                    else:
-                        if '_' in t:
-                            t = t.replace('_', ' ')
-                new_node.name = t
-                curr_node = new_node
-            elif state == 'PreColon' and state1 == 'PostClosed':
-                if unescape_name:
-                    while t.startswith("'") and t.endswith("'"):
-                        t = t[1:-1]
-                curr_node.name = t
-            elif state == 'PostColon':
-                # length data for the current node
-                curr_node.length = float(t)
-            else:
-                # can't think of a reason to get here
-                raise RecordError("Incorrect PhyloNode state? %s" % t)
-            state = 'PreColon'  # get here for any non-colon token
-            state1 = 'PreClosed'
-            last_token = t
-
-        if curr_node is not None and curr_node.parent is not None:
-            raise RecordError("Didn't get back to root of tree. The newick "
-                              "string may be malformed.")
-
-        if curr_node is None:  # no data -- return empty node
-            return cls()
-        return curr_node  # this should be the root of the tree
-
+    @experimental(as_of="0.4.0")
     def to_taxonomy(self, allow_empty=False, filter_f=None):
         """Returns a taxonomy representation of self
 
@@ -2147,12 +1939,12 @@ class TreeNode(SkbioObject):
             first parameter, and a ``list`` that represents the lineage as the
             second parameter.
 
-        Returns
-        -------
-        generator
-            (tip, [lineage]) where tip corresponds to a tip in the tree and
-            the [lineage] is the expanded names from root to tip. Nones and
-            empty strings are omitted from the lineage
+        Yields
+        ------
+        tuple
+            ``(tip, [lineage])`` where ``tip`` corresponds to a tip in the tree
+            and ``[lineage]`` is the expanded names from root to tip. ``None``
+            and empty strings are omitted from the lineage.
 
         Notes
         -----
@@ -2217,6 +2009,7 @@ class TreeNode(SkbioObject):
                     lineage.append(node.name)
                     seen.add(node.id)
 
+    @experimental(as_of="0.4.0")
     def to_array(self, attrs=None):
         """Return an array representation of self
 
@@ -2246,9 +2039,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> t = TreeNode.read(StringIO('(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7);'))
+        >>> t = TreeNode.read([u'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7);'])
         >>> res = t.to_array()
         >>> res.keys()
         ['child_index', 'length', 'name', 'id_index', 'id']
@@ -2276,7 +2068,7 @@ class TreeNode(SkbioObject):
         >>> res['id']
         array([0, 1, 2, 3, 4, 5, 6, 7])
         >>> res['name']
-        array(['a', 'b', 'c', 'd', 'x', 'y', 'z', None], dtype=object)
+        array([u'a', u'b', u'c', u'd', u'x', u'y', u'z', None], dtype=object)
 
         """
         if attrs is None:
@@ -2299,101 +2091,6 @@ class TreeNode(SkbioObject):
         results.update({attr: arr for (attr, dtype), arr in zip(attrs, tmp)})
         return results
 
-    def to_newick(self, with_distances=False, semicolon=True,
-                  escape_name=True):
-        r"""Return the newick string representation of this tree.
-
-        .. note:: Deprecated in scikit-bio 0.2.0-dev
-           ``to_newick`` will be removed in scikit-bio 0.3.0. It is replaced by
-           ``write``, which is a more general method for serializing TreeNode
-           instances. ``write`` supports multiple file formats by taking
-           advantage of scikit-bio's I/O registry system. See :mod:`skbio.io`
-           for more details.
-
-        Please see `TreeNode.from_newick` for a further description of the
-        Newick format.
-
-        Parameters
-        ----------
-        with_distances : bool
-            If true, include lengths between nodes
-        semicolon : bool
-            If true, terminate the tree string with a semicolon
-        escape_name : bool
-            If true, wrap node names that include []'"(),:;_ in single quotes
-
-        Returns
-        -------
-        str
-            A Newick string representation of the tree
-
-        See Also
-        --------
-        from_newick
-
-        Examples
-        --------
-        >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
-        >>> print(tree.to_newick())
-        ((a,b)c,(d,e)f)root;
-
-        """
-        warnings.warn(
-            "TreeNode.to_newick is deprecated and will be removed in "
-            "scikit-bio 0.3.0. Please update your code to use TreeNode.write.",
-            DeprecationWarning)
-        result = ['(']
-        nodes_stack = [[self, len(self.children)]]
-        node_count = 1
-
-        while nodes_stack:
-            node_count += 1
-            # check the top node, any children left unvisited?
-            top = nodes_stack[-1]
-            top_node, num_unvisited_children = top
-            if num_unvisited_children:  # has any child unvisited
-                top[1] -= 1  # decrease the #of children unvisited
-                next_child = top_node.children[-num_unvisited_children]
-                # pre-visit
-                if next_child.children:
-                    result.append('(')
-                nodes_stack.append([next_child, len(next_child.children)])
-            else:  # no unvisited children
-                nodes_stack.pop()
-                # post-visit
-                if top_node.children:
-                    result[-1] = ')'
-
-                if top_node.name is None:
-                    name = ''
-                else:
-                    name = str(top_node.name)
-                    if escape_name and not (name.startswith("'") and
-                                            name.endswith("'")):
-                        if re.search("""[]['"(),:;_]""", name):
-                            name = "'%s'" % name.replace("'", "''")
-                        else:
-                            name = name.replace(' ', '_')
-                result.append(name)
-
-                if with_distances and top_node.length is not None:
-                    result[-1] = "%s:%s" % (result[-1], top_node.length)
-
-                result.append(',')
-
-        if len(result) <= 3:  # single node with or without name
-            if semicolon:
-                return "%s;" % result[1]
-            else:
-                return result[1]
-        else:
-            if semicolon:
-                result[-1] = ';'
-            else:
-                result.pop(-1)
-            return ''.join(result)
-
     def _ascii_art(self, char1='-', show_internal=True, compact=False):
         LEN = 10
         PAD = ' ' * LEN
@@ -2429,6 +2126,7 @@ class TreeNode(SkbioObject):
         else:
             return ([char1 + '-' + namestr], 0)
 
+    @experimental(as_of="0.4.0")
     def ascii_art(self, show_internal=True, compact=False):
         r"""Returns a string containing an ascii drawing of the tree
 
@@ -2449,9 +2147,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
+        >>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
         >>> print(tree.ascii_art())
                             /-a
                   /c-------|
@@ -2465,6 +2162,7 @@ class TreeNode(SkbioObject):
                                        compact=compact)
         return '\n'.join(lines)
 
+    @experimental(as_of="0.4.0")
     def accumulate_to_ancestor(self, ancestor):
         r"""Return the sum of the distance between self and ancestor
 
@@ -2493,9 +2191,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
+        >>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
         >>> root = tree
         >>> tree.find('a').accumulate_to_ancestor(root)
         4.0
@@ -2515,6 +2212,7 @@ class TreeNode(SkbioObject):
 
         return accum
 
+    @experimental(as_of="0.4.0")
     def distance(self, other):
         """Return the distance between self and other
 
@@ -2546,9 +2244,8 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
+        >>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
         >>> tip_a = tree.find('a')
         >>> tip_d = tree.find('d')
         >>> tip_a.distance(tip_d)
@@ -2597,6 +2294,7 @@ class TreeNode(SkbioObject):
         max_pair = (distmtx.ids[idx_max[0]], distmtx.ids[idx_max[1]])
         return distmtx[idx_max], max_pair
 
+    @experimental(as_of="0.4.0")
     def get_max_distance(self):
         """Returns the max tip tip distance between any pair of tips
 
@@ -2621,14 +2319,13 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
+        >>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
         >>> dist, tips = tree.get_max_distance()
         >>> dist
         16.0
         >>> [n.name for n in tips]
-        ['b', 'e']
+        [u'b', u'e']
         """
         if not hasattr(self, 'MaxDistTips'):
             # _set_max_distance will throw a TreeError if a node with a single
@@ -2649,6 +2346,7 @@ class TreeNode(SkbioObject):
                 tips = [tip_a[1], tip_b[1]]
         return longest, tips
 
+    @experimental(as_of="0.4.0")
     def tip_tip_distances(self, endpoints=None):
         """Returns distance matrix between pairs of tips, and a tip order.
 
@@ -2680,14 +2378,13 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
+        >>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
         >>> mat = tree.tip_tip_distances()
         >>> print(mat)
         4x4 distance matrix
         IDs:
-        'a', 'b', 'd', 'e'
+        u'a', u'b', u'd', u'e'
         Data:
         [[  0.   3.  14.  15.]
          [  3.   0.  15.  16.]
@@ -2754,6 +2451,7 @@ class TreeNode(SkbioObject):
 
         return DistanceMatrix(result + result.T, [n.name for n in tip_order])
 
+    @experimental(as_of="0.4.0")
     def compare_rfd(self, other, proportion=False):
         """Calculates the Robinson and Foulds symmetric difference
 
@@ -2791,10 +2489,9 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree1 = TreeNode.read(StringIO("((a,b),(c,d));"))
-        >>> tree2 = TreeNode.read(StringIO("(((a,b),c),d);"))
+        >>> tree1 = TreeNode.read([u"((a,b),(c,d));"])
+        >>> tree2 = TreeNode.read([u"(((a,b),c),d);"])
         >>> tree1.compare_rfd(tree2)
         2.0
 
@@ -2826,6 +2523,7 @@ class TreeNode(SkbioObject):
 
         return dist
 
+    @experimental(as_of="0.4.0")
     def compare_subsets(self, other, exclude_absent_taxa=False):
         """Returns fraction of overlapping subsets where self and other differ.
 
@@ -2852,10 +2550,9 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree1 = TreeNode.read(StringIO("((a,b),(c,d));"))
-        >>> tree2 = TreeNode.read(StringIO("(((a,b),c),d);"))
+        >>> tree1 = TreeNode.read([u"((a,b),(c,d));"])
+        >>> tree2 = TreeNode.read([u"(((a,b),c),d);"])
         >>> tree1.compare_subsets(tree2)
         0.5
 
@@ -2877,6 +2574,7 @@ class TreeNode(SkbioObject):
 
         return 1 - (2 * intersection_length / float(total_subsets))
 
+    @experimental(as_of="0.4.0")
     def compare_tip_distances(self, other, sample=None, dist_f=distance_from_r,
                               shuffle_f=np.random.shuffle):
         """Compares self to other using tip-to-tip distance matrices.
@@ -2925,11 +2623,10 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
         >>> # note, only three common taxa between the trees
-        >>> tree1 = TreeNode.read(StringIO("((a:1,b:1):2,(c:0.5,X:0.7):3);"))
-        >>> tree2 = TreeNode.read(StringIO("(((a:1,b:1,Y:1):2,c:3):1,Z:4);"))
+        >>> tree1 = TreeNode.read([u"((a:1,b:1):2,(c:0.5,X:0.7):3);"])
+        >>> tree2 = TreeNode.read([u"(((a:1,b:1,Y:1):2,c:3):1,Z:4);"])
         >>> dist = tree1.compare_tip_distances(tree2)
         >>> print("%.9f" % dist)
         0.000133446
@@ -2958,6 +2655,7 @@ class TreeNode(SkbioObject):
 
         return dist_f(self_matrix, other_matrix)
 
+    @experimental(as_of="0.4.0")
     def index_tree(self):
         """Index a tree for rapid lookups within a tree array
 
@@ -2998,6 +2696,7 @@ class TreeNode(SkbioObject):
 
         return id_index, child_index
 
+    @experimental(as_of="0.4.0")
     def assign_ids(self):
         """Assign topologically stable unique ids to self
 
@@ -3012,6 +2711,7 @@ class TreeNode(SkbioObject):
 
         self.id = curr_index
 
+    @experimental(as_of="0.4.0")
     def descending_branch_length(self, tip_subset=None):
         """Find total descending branch length from self or subset of self tips
 
@@ -3047,10 +2747,9 @@ class TreeNode(SkbioObject):
 
         Examples
         --------
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tr = TreeNode.read(StringIO("(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G"
-        ...                             ":2.4,(H:.4,I:.5)J:1.3)K;"))
+        >>> tr = TreeNode.read([u"(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,"
+        ...                     "(H:.4,I:.5)J:1.3)K;"])
         >>> tdbl = tr.descending_branch_length()
         >>> sdbl = tr.descending_branch_length(['A','E'])
         >>> print(tdbl, sdbl)
@@ -3077,6 +2776,7 @@ class TreeNode(SkbioObject):
             return sum(n.length for n in self.postorder(include_self=True) if
                        n.length is not None)
 
+    @experimental(as_of="0.4.0")
     def cache_attr(self, func, cache_attrname, cache_type=list):
         """Cache attributes on internal nodes of the tree
 
@@ -3109,23 +2809,22 @@ class TreeNode(SkbioObject):
         --------
         Cache the tip names of the tree on its internal nodes
 
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
+        >>> tree = TreeNode.read([u"((a,b,(c,d)e)f,(g,h)i)root;"])
         >>> f = lambda n: [n.name] if n.is_tip() else []
         >>> tree.cache_attr(f, 'tip_names')
         >>> for n in tree.traverse(include_self=True):
         ...     print("Node name: %s, cache: %r" % (n.name, n.tip_names))
-        Node name: root, cache: ['a', 'b', 'c', 'd', 'g', 'h']
-        Node name: f, cache: ['a', 'b', 'c', 'd']
-        Node name: a, cache: ['a']
-        Node name: b, cache: ['b']
-        Node name: e, cache: ['c', 'd']
-        Node name: c, cache: ['c']
-        Node name: d, cache: ['d']
-        Node name: i, cache: ['g', 'h']
-        Node name: g, cache: ['g']
-        Node name: h, cache: ['h']
+        Node name: root, cache: [u'a', u'b', u'c', u'd', u'g', u'h']
+        Node name: f, cache: [u'a', u'b', u'c', u'd']
+        Node name: a, cache: [u'a']
+        Node name: b, cache: [u'b']
+        Node name: e, cache: [u'c', u'd']
+        Node name: c, cache: [u'c']
+        Node name: d, cache: [u'd']
+        Node name: i, cache: [u'g', u'h']
+        Node name: g, cache: [u'g']
+        Node name: h, cache: [u'h']
 
         """
         if cache_type in [set, frozenset]:
@@ -3146,6 +2845,7 @@ class TreeNode(SkbioObject):
             cached.append(cache_type(func(node)))
             setattr(node, cache_attrname, reduce(reduce_f, cached))
 
+    @experimental(as_of="0.4.0")
     def shuffle(self, k=None, names=None, shuffle_f=np.random.shuffle, n=1):
         """Yield trees with shuffled tip names
 
@@ -3169,10 +2869,10 @@ class TreeNode(SkbioObject):
         Tip names are shuffled inplace. If neither `k` nor `names` are
         provided, all tips are shuffled.
 
-        Returns
-        -------
-        GeneratorType
-            Yielding TreeNode
+        Yields
+        ------
+        TreeNode
+            Tree with shuffled tip names.
 
         Raises
         ------
@@ -3189,9 +2889,8 @@ class TreeNode(SkbioObject):
         Alternate the names on two of the tips, 'a', and 'b', and do this 5
         times.
 
-        >>> from six import StringIO
         >>> from skbio import TreeNode
-        >>> tree = TreeNode.read(StringIO("((a,b),(c,d));"))
+        >>> tree = TreeNode.read([u"((a,b),(c,d));"])
         >>> rev = lambda items: items.reverse()
         >>> shuffler = tree.shuffle(names=['a', 'b'], shuffle_f=rev, n=5)
         >>> for shuffled_tree in shuffler:
@@ -3240,55 +2939,3 @@ class TreeNode(SkbioObject):
 
             yield self
             counter += 1
-
-
-def _dnd_tokenizer(data):
-    r"""Tokenizes data into a stream of punctuation, labels and lengths.
-
-    Parameters
-    ----------
-    data : str
-        a DND-like (e.g., newick) string
-
-    Returns
-    -------
-    GeneratorType
-        Yields successive DND tokens
-
-    See Also
-    --------
-    TreeNode.from_newick
-    TreeNode.to_newick
-
-    Examples
-    --------
-    >>> from skbio.tree._tree import _dnd_tokenizer
-    >>> for token in _dnd_tokenizer("((tip1, tip2)internal1)"):
-    ...     print(token)
-    (
-    (
-    tip1
-    ,
-    tip2
-    )
-    internal1
-    )
-
-    """
-    dnd_tokens = set('(:),;')
-
-    in_quotes = False
-    saved = []
-    sa = saved.append
-    for d in data:
-        if d == "'":
-            in_quotes = not in_quotes
-        if d in dnd_tokens and not in_quotes:
-            curr = ''.join(saved).strip()
-            if curr:
-                yield curr
-            yield d
-            saved = []
-            sa = saved.append
-        else:
-            sa(d)
diff --git a/skbio/tree/_trie.py b/skbio/tree/_trie.py
index 32a020c..a00e9f1 100644
--- a/skbio/tree/_trie.py
+++ b/skbio/tree/_trie.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -11,6 +9,8 @@
 from __future__ import absolute_import, division, print_function
 from future.utils import viewitems
 
+from skbio.util._decorator import deprecated
+
 
 class _CompressedNode(object):
     """Represents a node in the compressed trie
@@ -19,18 +19,9 @@ class _CompressedNode(object):
     ----------
     key : string
         the key attached to the node
-
     values : list of objects, optional
         the values attached to this node
 
-    Attributes
-    ----------
-    values : list of objects
-        the values attached to this node
-    key : string
-        the key attached to the node
-    children : dict of {string: _CompressedNode}
-        the children nodes below this node
     """
 
     def __init__(self, key, values=None):
@@ -39,8 +30,8 @@ class _CompressedNode(object):
         self.children = {}
 
     def __nonzero__(self):
-        return (self.key != "" or len(self.values) > 0
-                or len(self.children.keys()) > 0)
+        return (self.key != "" or len(self.values) > 0 or
+                len(self.children.keys()) > 0)
 
     def __len__(self):
         """Returns the number of values attached to the node
@@ -177,6 +168,13 @@ class _CompressedNode(object):
                 return node.find(key[index:])
         return []
 
+trie_deprecation_p = {
+    'as_of': '0.4.0', 'until': '0.4.1', 'reason': (
+        "scikit-bio's trie functionality will be replaced with "
+        "with functionality from a dedicated package. To track "
+        "progress, see [#937]"
+        "(https://github.com/biocore/scikit-bio/issues/937).")}
+
 
 class CompressedTrie(object):
     """ A compressed Trie for a list of (key, value) pairs
@@ -186,30 +184,31 @@ class CompressedTrie(object):
     pair_list : list of tuples, optional
         List of (key, value) pairs to initialize the Trie
 
-    Attributes
-    ----------
-    size
-    prefix_map
     """
 
+    @deprecated(**trie_deprecation_p)
     def __init__(self, pair_list=None):
         self._root = _CompressedNode("")
         if pair_list:
             for key, value in pair_list:
                 self.insert(key, value)
 
+    @deprecated(**trie_deprecation_p)
     def __nonzero__(self):
         return bool(self._root)
 
+    @deprecated(**trie_deprecation_p)
     def __len__(self):
         return len(self._root)
 
     @property
+    @deprecated(**trie_deprecation_p)
     def size(self):
         """int with the number of nodes in the Trie"""
         return self._root.size
 
     @property
+    @deprecated(**trie_deprecation_p)
     def prefix_map(self):
         """Dict with the prefix map
 
@@ -217,6 +216,7 @@ class CompressedTrie(object):
         """
         return self._root.prefix_map
 
+    @deprecated(**trie_deprecation_p)
     def insert(self, key, value):
         """Inserts key with value in Trie
 
@@ -230,6 +230,7 @@ class CompressedTrie(object):
         """
         self._root.insert(key, value)
 
+    @deprecated(**trie_deprecation_p)
     def find(self, key):
         """Searches for key and returns values stored for the key.
 
@@ -246,19 +247,20 @@ class CompressedTrie(object):
         return self._root.find(key)
 
 
+ at deprecated(**trie_deprecation_p)
 def fasta_to_pairlist(seqs):
     """Yields (key, value) pairs, useful for populating a Trie object
 
     Parameters
     ----------
     seqs : Iterable
-        tuples of the form ``(label, seq)``, e.g., as obtained by
-        skbio.parse.sequences.parse_fasta
+        tuples of the form ``(label, seq)``
+
+    Yields
+    ------
+    tuple
+        Tuple of the form ``(seq, label)``.
 
-    Returns
-    -------
-    GeneratorType
-        yields tuples of the form ``(seq, label)``
     """
     for label, seq in seqs:
         yield seq, label
diff --git a/skbio/tree/tests/__init__.py b/skbio/tree/tests/__init__.py
index 0bf0c55..3fe3dc6 100644
--- a/skbio/tree/tests/__init__.py
+++ b/skbio/tree/tests/__init__.py
@@ -5,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/tree/tests/test_majority_rule.py b/skbio/tree/tests/test_majority_rule.py
index 4c9bb73..d466c17 100644
--- a/skbio/tree/tests/test_majority_rule.py
+++ b/skbio/tree/tests/test_majority_rule.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,11 +6,13 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from unittest import TestCase, main
 
-from six import StringIO
 import numpy as np
 
+from skbio.io._fileobject import StringIO
 from skbio import TreeNode
 from skbio.tree import majority_rule
 from skbio.tree._majority_rule import (_walk_clades, _filter_clades,
diff --git a/skbio/tree/tests/test_nj.py b/skbio/tree/tests/test_nj.py
index 554d6a7..75bad7c 100644
--- a/skbio/tree/tests/test_nj.py
+++ b/skbio/tree/tests/test_nj.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,9 +6,11 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from six import StringIO
+from __future__ import absolute_import, division, print_function
+
 from unittest import TestCase, main
 
+from skbio.io._fileobject import StringIO
 from skbio import DistanceMatrix, TreeNode, nj
 from skbio.tree._nj import (
     _compute_q, _compute_collapsed_dm, _lowest_index, _otu_to_new_node,
diff --git a/skbio/tree/tests/test_tree.py b/skbio/tree/tests/test_tree.py
index f125e77..22a922d 100644
--- a/skbio/tree/tests/test_tree.py
+++ b/skbio/tree/tests/test_tree.py
@@ -8,26 +8,23 @@
 
 from __future__ import absolute_import, division, print_function
 
-import warnings
 from unittest import TestCase, main
 
 import numpy as np
 import numpy.testing as nptest
 from scipy.stats import pearsonr
-from six import StringIO
 
+from skbio.io._fileobject import StringIO
 from skbio import DistanceMatrix, TreeNode
-from skbio.tree._tree import _dnd_tokenizer
 from skbio.tree import (DuplicateNodeError, NoLengthError,
                         TreeError, MissingNodeError, NoParentError)
-from skbio.io import RecordError
 
 
 class TreeTests(TestCase):
 
     def setUp(self):
         """Prep the self"""
-        self.simple_t = TreeNode.from_newick("((a,b)i1,(c,d)i2)root;")
+        self.simple_t = TreeNode.read(StringIO(u"((a,b)i1,(c,d)i2)root;"))
         nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])
         nodes['a'].append(nodes['b'])
         nodes['b'].append(nodes['c'])
@@ -49,8 +46,9 @@ class TreeTests(TestCase):
 
         self.rev_f = rev_f
         self.rotate_f = rotate_f
-        self.complex_tree = TreeNode.from_newick("(((a,b)int1,(x,y,(w,z)int2,"
-                                                 "(c,d)int3)int4),(e,f)int5);")
+        self.complex_tree = TreeNode.read(StringIO(u"(((a,b)int1,(x,y,(w,z)int"
+                                                   "2,(c,d)int3)int4),(e,f)int"
+                                                   "5);"))
 
     def test_count(self):
         """Get node counts"""
@@ -77,7 +75,7 @@ class TreeTests(TestCase):
 
     def test_append(self):
         """Append a node to a tree"""
-        second_tree = TreeNode.from_newick("(x,y)z;")
+        second_tree = TreeNode.read(StringIO(u"(x,y)z;"))
         self.simple_t.append(second_tree)
 
         self.assertEqual(self.simple_t.children[0].name, 'i1')
@@ -90,10 +88,19 @@ class TreeTests(TestCase):
 
     def test_extend(self):
         """Extend a few nodes"""
-        second_tree = TreeNode.from_newick("(x1,y1)z1;")
-        third_tree = TreeNode.from_newick("(x2,y2)z2;")
+        second_tree = TreeNode.read(StringIO(u"(x1,y1)z1;"))
+        third_tree = TreeNode.read(StringIO(u"(x2,y2)z2;"))
+        first_tree = TreeNode.read(StringIO(u"(x1,y1)z1;"))
+        fourth_tree = TreeNode.read(StringIO(u"(x2,y2)z2;"))
         self.simple_t.extend([second_tree, third_tree])
 
+        first_tree.extend(fourth_tree.children)
+        self.assertEqual(0, len(fourth_tree.children))
+        self.assertEqual(first_tree.children[0].name, 'x1')
+        self.assertEqual(first_tree.children[1].name, 'y1')
+        self.assertEqual(first_tree.children[2].name, 'x2')
+        self.assertEqual(first_tree.children[3].name, 'y2')
+
         self.assertEqual(self.simple_t.children[0].name, 'i1')
         self.assertEqual(self.simple_t.children[1].name, 'i2')
         self.assertEqual(self.simple_t.children[2].name, 'z1')
@@ -140,8 +147,8 @@ class TreeTests(TestCase):
 
     def test_pop(self):
         """Pop off a node"""
-        second_tree = TreeNode.from_newick("(x1,y1)z1;")
-        third_tree = TreeNode.from_newick("(x2,y2)z2;")
+        second_tree = TreeNode.read(StringIO(u"(x1,y1)z1;"))
+        third_tree = TreeNode.read(StringIO(u"(x2,y2)z2;"))
         self.simple_t.extend([second_tree, third_tree])
 
         i1 = self.simple_t.pop(0)
@@ -294,7 +301,7 @@ class TreeTests(TestCase):
         self.assertEqual(root._non_tip_cache, {})
 
     def test_invalidate_attr_caches(self):
-        tree = TreeNode.from_newick("((a,b,(c,d)e)f,(g,h)i)root;")
+        tree = TreeNode.read(StringIO(u"((a,b,(c,d)e)f,(g,h)i)root;"))
 
         def f(n):
             return [n.name] if n.is_tip() else []
@@ -306,10 +313,10 @@ class TreeTests(TestCase):
 
     def test_create_caches_duplicate_tip_names(self):
         with self.assertRaises(DuplicateNodeError):
-            TreeNode.from_newick('(a, a)').create_caches()
+            TreeNode.read(StringIO(u'(a, a);')).create_caches()
 
     def test_find_all(self):
-        t = TreeNode.from_newick("((a,b)c,((d,e)c)c,(f,(g,h)c)a)root;")
+        t = TreeNode.read(StringIO(u"((a,b)c,((d,e)c)c,(f,(g,h)c)a)root;"))
         exp = [t.children[0],
                t.children[1].children[0],
                t.children[1],
@@ -335,7 +342,7 @@ class TreeTests(TestCase):
 
     def test_find(self):
         """Find a node in a tree"""
-        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
         exp = t.children[0]
         obs = t.find('c')
         self.assertEqual(obs, exp)
@@ -349,7 +356,7 @@ class TreeTests(TestCase):
 
     def test_find_cache_bug(self):
         """First implementation did not force the cache to be at the root"""
-        t = TreeNode.from_newick("((a,b)c,(d,e)f,(g,h)f);")
+        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f,(g,h)f);"))
         exp_tip_cache_keys = set(['a', 'b', 'd', 'e', 'g', 'h'])
         exp_non_tip_cache_keys = set(['c', 'f'])
         tip_a = t.children[0].children[0]
@@ -361,8 +368,8 @@ class TreeTests(TestCase):
 
     def test_find_by_id(self):
         """Find a node by id"""
-        t1 = TreeNode.from_newick("((,),(,,));")
-        t2 = TreeNode.from_newick("((,),(,,));")
+        t1 = TreeNode.read(StringIO(u"((,),(,,));"))
+        t2 = TreeNode.read(StringIO(u"((,),(,,));"))
 
         exp = t1.children[1]
         obs = t1.find_by_id(6)  # right inner node with 3 children
@@ -377,7 +384,7 @@ class TreeTests(TestCase):
 
     def test_find_by_func(self):
         """Find nodes by a function"""
-        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
 
         def func(x):
             return x.parent == t.find('c')
@@ -423,7 +430,7 @@ class TreeTests(TestCase):
     def test_ascii_art(self):
         """Make some ascii trees"""
         # unlabeled internal node
-        tr = TreeNode.from_newick("(B:0.2,(C:0.3,D:0.4):0.6)F;")
+        tr = TreeNode.read(StringIO(u"(B:0.2,(C:0.3,D:0.4):0.6)F;"))
         obs = tr.ascii_art(show_internal=True, compact=False)
         exp = "          /-B\n-F-------|\n         |          /-C\n         "\
               " \\--------|\n                    \\-D"
@@ -437,12 +444,13 @@ class TreeTests(TestCase):
         self.assertEqual(obs, exp)
 
     def test_ascii_art_three_children(self):
-        obs = TreeNode.from_newick('(a,(b,c,d));').ascii_art()
+        obs = TreeNode.read(StringIO(u'(a,(b,c,d));')).ascii_art()
         self.assertEqual(obs, exp_ascii_art_three_children)
 
     def test_accumulate_to_ancestor(self):
         """Get the distance from a node to its ancestor"""
-        t = TreeNode.from_newick("((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;")
+        t = TreeNode.read(StringIO(
+            u"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
         a = t.find('a')
         b = t.find('b')
         exp_to_root = 0.1 + 0.3
@@ -454,7 +462,8 @@ class TreeTests(TestCase):
 
     def test_distance(self):
         """Get the distance between two nodes"""
-        t = TreeNode.from_newick("((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;")
+        t = TreeNode.read(StringIO(
+            u"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
         tips = sorted([n for n in t.tips()], key=lambda x: x.name)
 
         nptest.assert_almost_equal(tips[0].distance(tips[0]), 0.0)
@@ -477,7 +486,7 @@ class TreeTests(TestCase):
 
     def test_lowest_common_ancestor(self):
         """TreeNode lowestCommonAncestor should return LCA for set of tips"""
-        t1 = TreeNode.from_newick("((a,(b,c)d)e,f,(g,h)i)j;")
+        t1 = TreeNode.read(StringIO(u"((a,(b,c)d)e,f,(g,h)i)j;"))
         t2 = t1.copy()
         t3 = t1.copy()
         t4 = t1.copy()
@@ -513,16 +522,16 @@ class TreeTests(TestCase):
 
     def test_get_max_distance(self):
         """get_max_distance should get max tip distance across tree"""
-        tree = TreeNode.from_newick(
-            "((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;")
+        tree = TreeNode.read(StringIO(
+            u"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
         dist, nodes = tree.get_max_distance()
         nptest.assert_almost_equal(dist, 1.6)
         self.assertEqual(sorted([n.name for n in nodes]), ['b', 'e'])
 
     def test_set_max_distance(self):
         """set_max_distance sets MaxDistTips across tree"""
-        tree = TreeNode.from_newick(
-            "((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;")
+        tree = TreeNode.read(StringIO(
+            u"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
         tree._set_max_distance()
         tip_a, tip_b = tree.MaxDistTips
         self.assertEqual(tip_a[0] + tip_b[0], 1.6)
@@ -530,14 +539,14 @@ class TreeTests(TestCase):
 
     def test_shear(self):
         """Shear the nodes"""
-        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
+        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
         obs = str(t.shear(['G', 'M']))
         exp = '(G:3.0,M:3.7);\n'
         self.assertEqual(obs, exp)
 
     def test_compare_tip_distances(self):
-        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
-        t2 = TreeNode.from_newick('(((H:1,G:1,O:1):2,R:3):1,X:4);')
+        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t2 = TreeNode.read(StringIO(u'(((H:1,G:1,O:1):2,R:3):1,X:4);'))
         obs = t.compare_tip_distances(t2)
         # note: common taxa are H, G, R (only)
         m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
@@ -546,8 +555,8 @@ class TreeTests(TestCase):
         self.assertAlmostEqual(obs, (1 - r) / 2)
 
     def test_compare_tip_distances_sample(self):
-        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
-        t2 = TreeNode.from_newick('(((H:1,G:1,O:1):2,R:3):1,X:4);')
+        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t2 = TreeNode.read(StringIO(u'(((H:1,G:1,O:1):2,R:3):1,X:4);'))
         obs = t.compare_tip_distances(t2, sample=3, shuffle_f=sorted)
         # note: common taxa are H, G, R (only)
         m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
@@ -556,29 +565,29 @@ class TreeTests(TestCase):
         self.assertAlmostEqual(obs, (1 - r) / 2)
 
         # 4 common taxa, still picking H, G, R
-        s = '((H:1,G:1):2,(R:0.5,M:0.7,Q:5):3);'
-        t = TreeNode.from_newick(s, TreeNode)
-        s3 = '(((H:1,G:1,O:1):2,R:3,Q:10):1,X:4);'
-        t3 = TreeNode.from_newick(s3, TreeNode)
+        s = u'((H:1,G:1):2,(R:0.5,M:0.7,Q:5):3);'
+        t = TreeNode.read(StringIO(s))
+        s3 = u'(((H:1,G:1,O:1):2,R:3,Q:10):1,X:4);'
+        t3 = TreeNode.read(StringIO(s3))
         obs = t.compare_tip_distances(t3, sample=3, shuffle_f=sorted)
 
     def test_compare_tip_distances_no_common_tips(self):
-        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
-        t2 = TreeNode.from_newick('(((Z:1,Y:1,X:1):2,W:3):1,V:4);')
+        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t2 = TreeNode.read(StringIO(u'(((Z:1,Y:1,X:1):2,W:3):1,V:4);'))
 
         with self.assertRaises(ValueError):
             t.compare_tip_distances(t2)
 
     def test_compare_tip_distances_single_common_tip(self):
-        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
-        t2 = TreeNode.from_newick('(((R:1,Y:1,X:1):2,W:3):1,V:4);')
+        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t2 = TreeNode.read(StringIO(u'(((R:1,Y:1,X:1):2,W:3):1,V:4);'))
 
         self.assertEqual(t.compare_tip_distances(t2), 1)
         self.assertEqual(t2.compare_tip_distances(t), 1)
 
     def test_tip_tip_distances_endpoints(self):
         """Test getting specifc tip distances  with tipToTipDistances"""
-        t = TreeNode.from_newick('((H:1,G:1):2,(R:0.5,M:0.7):3);')
+        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
         nodes = [t.find('H'), t.find('G'), t.find('M')]
         names = ['H', 'G', 'M']
         exp = DistanceMatrix(np.array([[0, 2.0, 6.7],
@@ -592,18 +601,18 @@ class TreeTests(TestCase):
         self.assertEqual(obs, exp)
 
     def test_tip_tip_distances_non_tip_endpoints(self):
-        t = TreeNode.from_newick('((H:1,G:1)foo:2,(R:0.5,M:0.7):3);')
+        t = TreeNode.read(StringIO(u'((H:1,G:1)foo:2,(R:0.5,M:0.7):3);'))
         with self.assertRaises(ValueError):
             t.tip_tip_distances(endpoints=['foo'])
 
     def test_tip_tip_distances_no_length(self):
-        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
         with self.assertRaises(NoLengthError):
             t.tip_tip_distances()
 
     def test_neighbors(self):
         """Get neighbors of a node"""
-        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
         exp = t.children
         obs = t.neighbors()
         self.assertEqual(obs, exp)
@@ -622,7 +631,7 @@ class TreeTests(TestCase):
 
     def test_has_children(self):
         """Test if has children"""
-        t = TreeNode.from_newick("((a,b)c,(d,e)f);")
+        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
         self.assertTrue(t.has_children())
         self.assertTrue(t.children[0].has_children())
         self.assertTrue(t.children[1].has_children())
@@ -668,9 +677,9 @@ class TreeTests(TestCase):
     def test_index_tree(self):
         """index_tree should produce correct index and node map"""
         # test for first tree: contains singleton outgroup
-        t1 = TreeNode.from_newick('(((a,b),c),(d,e))')
-        t2 = TreeNode.from_newick('(((a,b),(c,d)),(e,f))')
-        t3 = TreeNode.from_newick('(((a,b,c),(d)),(e,f))')
+        t1 = TreeNode.read(StringIO(u'(((a,b),c),(d,e));'))
+        t2 = TreeNode.read(StringIO(u'(((a,b),(c,d)),(e,f));'))
+        t3 = TreeNode.read(StringIO(u'(((a,b,c),(d)),(e,f));'))
 
         id_1, child_1 = t1.index_tree()
         nodes_1 = [n.id for n in t1.traverse(self_before=False,
@@ -696,7 +705,7 @@ class TreeTests(TestCase):
 
     def test_root_at(self):
         """Form a new root"""
-        t = TreeNode.from_newick("(((a,b)c,(d,e)f)g,h)i;")
+        t = TreeNode.read(StringIO(u"(((a,b)c,(d,e)f)g,h)i;"))
         with self.assertRaises(TreeError):
             t.root_at(t.find('h'))
 
@@ -720,16 +729,16 @@ class TreeTests(TestCase):
 
     def test_root_at_midpoint_no_lengths(self):
         # should get same tree back (a copy)
-        nwk = '(a,b)c;\n'
-        t = TreeNode.from_newick(nwk)
+        nwk = u'(a,b)c;\n'
+        t = TreeNode.read(StringIO(nwk))
         obs = t.root_at_midpoint()
         self.assertEqual(str(obs), nwk)
 
     def test_compare_subsets(self):
         """compare_subsets should return the fraction of shared subsets"""
-        t = TreeNode.from_newick('((H,G),(R,M));')
-        t2 = TreeNode.from_newick('(((H,G),R),M);')
-        t4 = TreeNode.from_newick('(((H,G),(O,R)),X);')
+        t = TreeNode.read(StringIO(u'((H,G),(R,M));'))
+        t2 = TreeNode.read(StringIO(u'(((H,G),R),M);'))
+        t4 = TreeNode.read(StringIO(u'(((H,G),(O,R)),X);'))
 
         result = t.compare_subsets(t)
         self.assertEqual(result, 0)
@@ -754,9 +763,9 @@ class TreeTests(TestCase):
 
     def test_compare_rfd(self):
         """compare_rfd should return the Robinson Foulds distance"""
-        t = TreeNode.from_newick('((H,G),(R,M));')
-        t2 = TreeNode.from_newick('(((H,G),R),M);')
-        t4 = TreeNode.from_newick('(((H,G),(O,R)),X);')
+        t = TreeNode.read(StringIO(u'((H,G),(R,M));'))
+        t2 = TreeNode.read(StringIO(u'(((H,G),R),M);'))
+        t4 = TreeNode.read(StringIO(u'(((H,G),(O,R)),X);'))
 
         obs = t.compare_rfd(t2)
         exp = 2.0
@@ -773,9 +782,9 @@ class TreeTests(TestCase):
 
     def test_assign_ids(self):
         """Assign IDs to the tree"""
-        t1 = TreeNode.from_newick("(((a,b),c),(e,f),(g));")
-        t2 = TreeNode.from_newick("(((a,b),c),(e,f),(g));")
-        t3 = TreeNode.from_newick("((g),(e,f),(c,(a,b)));")
+        t1 = TreeNode.read(StringIO(u"(((a,b),c),(e,f),(g));"))
+        t2 = TreeNode.read(StringIO(u"(((a,b),c),(e,f),(g));"))
+        t3 = TreeNode.read(StringIO(u"((g),(e,f),(c,(a,b)));"))
         t1_copy = t1.copy()
 
         t1.assign_ids()
@@ -792,9 +801,9 @@ class TreeTests(TestCase):
 
     def test_assign_ids_index_tree(self):
         """assign_ids and index_tree should assign the same IDs"""
-        t1 = TreeNode.from_newick('(((a,b),c),(d,e))')
-        t2 = TreeNode.from_newick('(((a,b),(c,d)),(e,f))')
-        t3 = TreeNode.from_newick('(((a,b,c),(d)),(e,f))')
+        t1 = TreeNode.read(StringIO(u'(((a,b),c),(d,e));'))
+        t2 = TreeNode.read(StringIO(u'(((a,b),(c,d)),(e,f));'))
+        t3 = TreeNode.read(StringIO(u'(((a,b,c),(d)),(e,f));'))
         t1_copy = t1.copy()
         t2_copy = t2.copy()
         t3_copy = t3.copy()
@@ -815,7 +824,7 @@ class TreeTests(TestCase):
 
     def test_unrooted_deepcopy(self):
         """Do an unrooted_copy"""
-        t = TreeNode.from_newick("((a,(b,c)d)e,(f,g)h)i;")
+        t = TreeNode.read(StringIO(u"((a,(b,c)d)e,(f,g)h)i;"))
         exp = "(b,c,(a,((f,g)h)e)d)root;\n"
         obs = t.find('d').unrooted_deepcopy()
         self.assertEqual(str(obs), exp)
@@ -827,8 +836,8 @@ class TreeTests(TestCase):
 
     def test_descending_branch_length(self):
         """Calculate descending branch_length"""
-        tr = TreeNode.from_newick("(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
-                                  ":.4,I:.5)J:1.3)K;")
+        tr = TreeNode.read(StringIO(u"(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4"
+                                    ",(H:.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length()
         sdbl = tr.descending_branch_length(['A', 'E'])
         nptest.assert_almost_equal(tdbl, 8.9)
@@ -837,36 +846,36 @@ class TreeTests(TestCase):
                           ['A', 'DNE'])
         self.assertRaises(ValueError, tr.descending_branch_length, ['A', 'C'])
 
-        tr = TreeNode.from_newick("(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4"
-                                  ",I:.5)J:1.3)K;")
+        tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
+                                    ":.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length()
         nptest.assert_almost_equal(tdbl, 8.8)
 
-        tr = TreeNode.from_newick("(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:"
-                                  ".5)J:1.3)K;")
+        tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4"
+                                    ",I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length()
         nptest.assert_almost_equal(tdbl, 7.9)
 
-        tr = TreeNode.from_newick("(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:"
-                                  ".5)J:1.3)K;")
+        tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4"
+                                    ",I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length(['A', 'D', 'E'])
         nptest.assert_almost_equal(tdbl, 2.1)
 
-        tr = TreeNode.from_newick("(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:."
-                                  "4,I:.5)J:1.3)K;")
+        tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
+                                    ":.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length(['I', 'D', 'E'])
         nptest.assert_almost_equal(tdbl, 6.6)
 
         # test with a situation where we have unnamed internal nodes
-        tr = TreeNode.from_newick("(((A,B:1.2):.6,(D:.9,E:.6)F):2.4,(H:.4,I:"
-                                  ".5)J:1.3);")
+        tr = TreeNode.read(StringIO(u"(((A,B:1.2):.6,(D:.9,E:.6)F):2.4,(H:.4,I"
+                                    ":.5)J:1.3);"))
         tdbl = tr.descending_branch_length()
         nptest.assert_almost_equal(tdbl, 7.9)
 
     def test_to_array(self):
         """Convert a tree to arrays"""
-        t = TreeNode.from_newick(
-            '(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10)')
+        t = TreeNode.read(StringIO(
+            u'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
         id_index, child_index = t.index_tree()
         arrayed = t.to_array()
 
@@ -887,8 +896,8 @@ class TreeTests(TestCase):
         nptest.assert_equal(obs, exp)
 
     def test_to_array_attrs(self):
-        t = TreeNode.from_newick(
-            '(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10)')
+        t = TreeNode.read(StringIO(
+            u'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
         id_index, child_index = t.index_tree()
         arrayed = t.to_array(attrs=[('name', object)])
 
@@ -914,8 +923,8 @@ class TreeTests(TestCase):
                           '3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
                           '4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
                           '5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
-        exp = TreeNode.from_newick("((((((((1)g)f)e)d,((((2)y)x)))c)b)a,"
-                                   "(((((((3,5)n,(4)q)m)l)k)j)i)h);")
+        exp = TreeNode.read(StringIO(u"((((((((1)g)f)e)d,((((2)y)x)))c)b)a,"
+                                     "(((((((3,5)n,(4)q)m)l)k)j)i)h);"))
 
         root = TreeNode.from_taxonomy(input_lineages.items())
 
@@ -950,14 +959,6 @@ class TreeTests(TestCase):
         obs = [(n.name, lin) for n, lin in tree.to_taxonomy(filter_f=f)]
         self.assertEqual(sorted(obs), exp)
 
-    def test_from_file(self):
-        """Parse a tree from a file"""
-        with warnings.catch_warnings(record=True):
-            warnings.simplefilter("ignore")
-            t_io = StringIO("((a,b)c,(d,e)f)g;")
-            t = TreeNode.from_file(t_io)
-            self.assertEqual(list('abcdefg'), [n.name for n in t.postorder()])
-
     def test_linkage_matrix(self):
         # Ensure matches: http://www.southampton.ac.uk/~re1u06/teaching/upgma/
         id_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
@@ -973,50 +974,6 @@ class TreeTests(TestCase):
                          "F:0.5):5.75):2.0):6.25):2.5);\n",
                          str(tree))
 
-    def test_from_newick_empty(self):
-        obs = TreeNode.from_newick('')
-        self.assertTrue(obs.name is None)
-        self.assertTrue(obs.length is None)
-        self.assertTrue(obs.parent is None)
-        self.assertEqual(obs.children, [])
-        self.assertTrue(obs.id is None)
-
-    def test_from_newick_embedded_semicolon(self):
-        with self.assertRaises(RecordError):
-            TreeNode.from_newick('(a,(c,;b))')
-
-    def test_to_newick_single_node(self):
-        # single node, no name, with semicolon
-        with warnings.catch_warnings(record=True):
-            warnings.simplefilter("ignore")
-            obs = TreeNode().to_newick()
-            self.assertEqual(obs, ';')
-
-            # single node, no name, without semicolon
-            obs = TreeNode().to_newick(semicolon=False)
-            self.assertEqual(obs, '')
-
-            # single node, with name, with semicolon
-            obs = TreeNode(name='brofist').to_newick()
-            self.assertEqual(obs, 'brofist;')
-
-            # single node, with name, without semicolon
-            obs = TreeNode(name='brofist').to_newick(semicolon=False)
-            self.assertEqual(obs, 'brofist')
-
-    def test_to_newick_multi_node(self):
-        with warnings.catch_warnings(record=True):
-            warnings.simplefilter("ignore")
-            t = TreeNode.from_newick(double)
-
-            # with semicolon
-            obs = t.to_newick()
-            self.assertEqual(obs, '(abc,def);')
-
-            # without semicolon
-            obs = t.to_newick(semicolon=False)
-            self.assertEqual(obs, '(abc,def)')
-
     def test_shuffle_invalid_iter(self):
         shuffler = self.simple_t.shuffle(n=-1)
         with self.assertRaises(ValueError):
@@ -1075,222 +1032,6 @@ class TreeTests(TestCase):
             next(self.simple_t.shuffle(names=['x', 'y']))
 
 
-class DndTokenizerTests(TestCase):
-
-    """Tests of the DndTokenizer factory function."""
-
-    def test_gdata(self):
-        """DndTokenizer should work as expected on real data"""
-        exp = \
-            ['(', '(', 'xyz', ':', '0.28124', ',', '(', 'def', ':', '0.24498',
-             ',', 'mno', ':', '0.03627', ')', ':', '0.17710', ')', ':',
-             '0.04870', ',', 'abc', ':', '0.05925', ',', '(', 'ghi', ':',
-             '0.06914', ',', 'jkl', ':', '0.13776', ')', ':', '0.09853', ')',
-             ';']
-        # split it up for debugging on an item-by-item basis
-        obs = list(_dnd_tokenizer(sample))
-        self.assertEqual(len(obs), len(exp))
-        for i, j in zip(obs, exp):
-            self.assertEqual(i, j)
-        # try it all in one go
-        self.assertEqual(list(_dnd_tokenizer(sample)), exp)
-
-    def test_nonames(self):
-        """DndTokenizer should work as expected on trees with no names"""
-        exp = ['(', '(', ',', ')', ',', '(', ',', ')', ')', ';']
-        obs = list(_dnd_tokenizer(no_names))
-        self.assertEqual(obs, exp)
-
-    def test_missing_tip_name(self):
-        """DndTokenizer should work as expected on trees with a missing name"""
-        exp = ['(', '(', 'a', ',', 'b', ')', ',', '(', 'c', ',', ')', ')', ';']
-        obs = list(_dnd_tokenizer(missing_tip_name))
-        self.assertEqual(obs, exp)
-
-    def test_minimal(self):
-        """DndTokenizer should work as expected a minimal tree without names"""
-        exp = ['(', ')', ';']
-        obs = list(_dnd_tokenizer(minimal))
-        self.assertEqual(obs, exp)
-
-
-class DndParserTests(TestCase):
-
-    """Tests of the DndParser factory function."""
-
-    def test_nonames(self):
-        """DndParser should produce the correct tree when there are no names"""
-        obs = TreeNode.from_newick(no_names)
-        exp = TreeNode()
-        exp.append(TreeNode())
-        exp.append(TreeNode())
-        exp.children[0].append(TreeNode())
-        exp.children[0].append(TreeNode())
-        exp.children[1].append(TreeNode())
-        exp.children[1].append(TreeNode())
-        self.assertEqual(str(obs), str(exp))
-
-    def test_minimal(self):
-        """DndParser should produce the correct minimal tree"""
-        obs = TreeNode.from_newick(minimal)
-        exp = TreeNode()
-        exp.append(TreeNode())
-        self.assertEqual(str(obs), str(exp))
-
-    def test_missing_tip_name(self):
-        """DndParser should produce the correct tree when missing a name"""
-        obs = TreeNode.from_newick(missing_tip_name)
-        exp = TreeNode()
-        exp.append(TreeNode())
-        exp.append(TreeNode())
-        exp.children[0].append(TreeNode(name='a'))
-        exp.children[0].append(TreeNode(name='b'))
-        exp.children[1].append(TreeNode(name='c'))
-        exp.children[1].append(TreeNode())
-        self.assertEqual(str(obs), str(exp))
-
-    def test_gsingle(self):
-        """DndParser should produce a single-child TreeNode on minimal data"""
-        t = TreeNode.from_newick(single)
-        self.assertEqual(len(t), 1)
-        child = t[0]
-        self.assertEqual(child.name, 'abc')
-        self.assertEqual(child.length, 3)
-        self.assertEqual(str(t), '(abc:3.0);\n')
-
-    def test_gdouble(self):
-        """DndParser should produce a double-child TreeNode from data"""
-        t = TreeNode.from_newick(double)
-        self.assertEqual(len(t), 2)
-        self.assertEqual(str(t), '(abc:3.0,def:4.0);\n')
-
-    def test_gonenest(self):
-        """DndParser should work correctly with nested data"""
-        t = TreeNode.from_newick(onenest)
-        self.assertEqual(len(t), 2)
-        self.assertEqual(len(t[0]), 0)  # first child is terminal
-        self.assertEqual(len(t[1]), 2)  # second child has two children
-        self.assertEqual(str(t), '(abc:3.0,(def:4.0,ghi:5.0):6.0);\n')
-
-    def test_gnodedata(self):
-        """DndParser should assign name to internal nodes correctly"""
-        t = TreeNode.from_newick(nodedata)
-        self.assertEqual(len(t), 2)
-        self.assertEqual(len(t[0]), 0)  # first child is terminal
-        self.assertEqual(len(t[1]), 2)  # second child has two children
-        self.assertEqual(str(t), '(abc:3.0,(def:4.0,ghi:5.0)jkl:6.0);\n')
-        info_dict = {}
-        for node in t.traverse():
-            info_dict[node.name] = node.length
-        self.assertEqual(info_dict['abc'], 3.0)
-        self.assertEqual(info_dict['def'], 4.0)
-        self.assertEqual(info_dict['ghi'], 5.0)
-        self.assertEqual(info_dict['jkl'], 6.0)
-
-    def test_data(self):
-        """DndParser should work as expected on real data"""
-        t = TreeNode.from_newick(sample)
-        self.assertEqual(
-            str(t), '((xyz:0.28124,(def:0.24498,mno:0.03627):0.1771):0.0487,'
-                    'abc:0.05925,(ghi:0.06914,jkl:0.13776):0.09853);\n')
-        tdata = TreeNode.from_newick(node_data_sample, unescape_name=True)
-        self.assertEqual(
-            str(tdata), "((xyz:0.28124,(def:0.24498,mno:0.03627)A:0.1771)"
-                        "B:0.0487,abc:0.05925,(ghi:0.06914,jkl:0.13776)"
-                        "C:0.09853);\n")
-
-    def test_gbad(self):
-        """DndParser should fail if parens unbalanced"""
-        left = '((abc:3)'
-        right = '(abc:3))'
-        self.assertRaises(RecordError, TreeNode.from_newick, left)
-        self.assertRaises(RecordError, TreeNode.from_newick, right)
-
-    def test_DndParser(self):
-        """DndParser tests"""
-        with warnings.catch_warnings(record=True):
-            warnings.simplefilter("ignore")
-            t_str = "(A_a,(B:1.0,C),'D_e':0.5)E;"
-            tree_unesc = TreeNode.from_newick(t_str, unescape_name=True)
-            tree_esc = TreeNode.from_newick(t_str, unescape_name=False)
-
-            self.assertEqual(tree_unesc.name, 'E')
-            self.assertEqual(tree_unesc.children[0].name, 'A a')
-            self.assertEqual(tree_unesc.children[1].children[0].name, 'B')
-            self.assertEqual(tree_unesc.children[1].children[0].length, 1.0)
-            self.assertEqual(tree_unesc.children[1].children[1].name, 'C')
-            self.assertEqual(tree_unesc.children[2].name, 'D_e')
-            self.assertEqual(tree_unesc.children[2].length, 0.5)
-
-            self.assertEqual(tree_esc.name, 'E')
-            self.assertEqual(tree_esc.children[0].name, 'A_a')
-            self.assertEqual(tree_esc.children[1].children[0].name, 'B')
-            self.assertEqual(tree_esc.children[1].children[0].length, 1.0)
-            self.assertEqual(tree_esc.children[1].children[1].name, 'C')
-            self.assertEqual(tree_esc.children[2].name, "'D_e'")
-            self.assertEqual(tree_esc.children[2].length, 0.5)
-
-            reload_test = tree_esc.to_newick(with_distances=True,
-                                             escape_name=False)
-            obs = TreeNode.from_newick(reload_test, unescape_name=False)
-            self.assertEqual(obs.to_newick(with_distances=True),
-                             tree_esc.to_newick(with_distances=True))
-            reload_test = tree_unesc.to_newick(with_distances=True,
-                                               escape_name=False)
-            obs = TreeNode.from_newick(reload_test, unescape_name=False)
-            self.assertEqual(obs.to_newick(with_distances=True),
-                             tree_unesc.to_newick(with_distances=True))
-
-    def test_DndParser_list(self):
-        """Make sure TreeNode.from_newick can handle list of strings"""
-        with warnings.catch_warnings(record=True):
-            warnings.simplefilter("ignore")
-
-            t_str = ["(A_a,(B:1.0,C)", ",'D_e':0.5)E;"]
-            tree_unesc = TreeNode.from_newick(t_str, unescape_name=True)
-
-            self.assertEqual(tree_unesc.name, 'E')
-            self.assertEqual(tree_unesc.children[0].name, 'A a')
-            self.assertEqual(tree_unesc.children[1].children[0].name, 'B')
-            self.assertEqual(tree_unesc.children[1].children[0].length, 1.0)
-            self.assertEqual(tree_unesc.children[1].children[1].name, 'C')
-            self.assertEqual(tree_unesc.children[2].name, 'D_e')
-            self.assertEqual(tree_unesc.children[2].length, 0.5)
-
-    def test_cache_attr_tip_list(self):
-        tree = TreeNode.read(StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
-
-        def f(n):
-            return [n.name] if n.is_tip() else []
-
-        tree.cache_attr(f, 'tip_names')
-        self.assertEqual(tree.tip_names, ['a', 'b', 'c', 'd', 'g', 'h'])
-        self.assertEqual(tree.children[0].tip_names, ['a', 'b', 'c', 'd'])
-        self.assertEqual(tree.children[1].tip_names, ['g', 'h'])
-        self.assertEqual(tree.children[0].children[2].tip_names, ['c', 'd'])
-
-    def test_cache_attr_nontip_set(self):
-        tree = TreeNode.read(StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
-
-        def f(n):
-            return [n.name] if not n.is_tip() else []
-
-        tree.cache_attr(f, 'nontip_names')
-        self.assertEqual(tree.nontip_names, ['e', 'f', 'i', 'root'])
-        self.assertEqual(tree.children[0].nontip_names, ['e', 'f'])
-        self.assertEqual(tree.children[1].nontip_names, ['i'])
-        self.assertEqual(tree.children[0].children[2].nontip_names, ['e'])
-
-    def test_cache_attr_bad_type(self):
-        tree = TreeNode.read(StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
-
-        def f(n):
-            return [n.name] if not n.is_tip() else []
-
-        with self.assertRaises(TypeError):
-            tree.cache_attr(f, 'nontip_names', TreeNode)
-
-
 sample = """
 (
 (
diff --git a/skbio/util/__init__.py b/skbio/util/__init__.py
index 86c2699..68eb6d8 100644
--- a/skbio/util/__init__.py
+++ b/skbio/util/__init__.py
@@ -17,6 +17,8 @@ Common functionality to support testing in skbio.
    :toctree: generated/
 
    get_data_path
+   TestRunner
+   assert_data_frame_almost_equal
 
 Miscellaneous functionality
 ---------------------------
@@ -58,19 +60,20 @@ Warnings
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 
-from numpy.testing import Tester
+from __future__ import absolute_import, division, print_function
 
 from ._warning import EfficiencyWarning
 from ._exception import TestingUtilError
 from ._misc import (cardinal_to_ordinal, create_dir, find_duplicates, flatten,
                     is_casava_v180_or_later, remove_files, safe_md5)
-from ._testing import get_data_path
+from ._testing import (get_data_path, TestRunner,
+                       assert_data_frame_almost_equal)
 
-__all__ = ['EfficiencyWarning', 'TestingUtilError',
-           'cardinal_to_ordinal', 'create_dir', 'find_duplicates', 'flatten',
+__all__ = ['EfficiencyWarning', 'TestingUtilError', 'cardinal_to_ordinal',
+           'create_dir', 'find_duplicates', 'flatten',
            'is_casava_v180_or_later', 'remove_files', 'safe_md5',
-           'get_data_path']
+           'get_data_path', 'TestRunner', 'assert_data_frame_almost_equal']
 
-test = Tester().test
+test = TestRunner(__file__).test
diff --git a/skbio/util/_decorator.py b/skbio/util/_decorator.py
new file mode 100644
index 0000000..258dedd
--- /dev/null
+++ b/skbio/util/_decorator.py
@@ -0,0 +1,338 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+import warnings
+import textwrap
+
+import decorator
+
+from ._exception import OverrideError
+
+
+class _state_decorator(object):
+    """ Base class for decorators of all public functionality.
+    """
+
+    _required_kwargs = ()
+
+    def _get_indentation_level(self, docstring_lines,
+                               default_existing_docstring=4,
+                               default_no_existing_docstring=0):
+        """ Determine the level of indentation of the docstring to match it.
+
+            The indented content after the first line of a docstring can
+            differ based on the nesting of the functionality being documented.
+            For example, a top-level function may have its "Parameters" section
+            indented four-spaces, but a method nested under a class may have
+            its "Parameters" section indented eight spaces. This function
+            determines the indentation level of the first non-whitespace line
+            following the initial summary line.
+        """
+        # if there is no existing docstring, return the corresponding default
+        if len(docstring_lines) == 0:
+            return default_no_existing_docstring
+
+        # if there is an existing docstring with only a single line, return
+        # the corresponding default
+        if len(docstring_lines) == 1:
+            return default_existing_docstring
+
+        # find the first non-blank line (after the initial summary line) and
+        # return the number of leading spaces on that line
+        for line in docstring_lines[1:]:
+            if len(line.strip()) == 0:
+                # ignore blank lines
+                continue
+            else:
+                return len(line) - len(line.lstrip())
+
+        # if there is an existing docstring with only a single non-whitespace
+        # line, return the corresponding default
+        return default_existing_docstring
+
+    def _update_docstring(self, docstring, state_desc,
+                          state_desc_prefix='State: '):
+        # Hande the case of no initial docstring
+        if docstring is None:
+            return "%s%s" % (state_desc_prefix, state_desc)
+
+        docstring_lines = docstring.split('\n')
+        docstring_content_indentation = \
+            self._get_indentation_level(docstring_lines)
+
+        # wrap lines at 79 characters, accounting for the length of
+        # docstring_content_indentation and start_desc_prefix
+        len_state_desc_prefix = len(state_desc_prefix)
+        wrap_at = 79 - (docstring_content_indentation + len_state_desc_prefix)
+        state_desc_lines = textwrap.wrap(state_desc, wrap_at)
+        # The first line of the state description should start with
+        # state_desc_prefix, while the others should start with spaces to align
+        # the text in this section. This is for consistency with numpydoc
+        # formatting of deprecation notices, which are done using the note
+        # Sphinx directive.
+        state_desc_lines[0] = '%s%s%s' % (' ' * docstring_content_indentation,
+                                          state_desc_prefix,
+                                          state_desc_lines[0])
+        header_spaces = ' ' * (docstring_content_indentation +
+                               len_state_desc_prefix)
+        for i, line in enumerate(state_desc_lines[1:], 1):
+            state_desc_lines[i] = '%s%s' % (header_spaces, line)
+
+        new_doc_lines = '\n'.join(state_desc_lines)
+        docstring_lines[0] = '%s\n\n%s' % (docstring_lines[0], new_doc_lines)
+        return '\n'.join(docstring_lines)
+
+    def _validate_kwargs(self, **kwargs):
+        for required_kwarg in self._required_kwargs:
+            if required_kwarg not in kwargs:
+                raise ValueError('%s decorator requires parameter: %s' %
+                                 (self.__class__, required_kwarg))
+
+
+class stable(_state_decorator):
+    """ State decorator indicating stable functionality.
+
+    Used to indicate that public functionality is considered ``stable``,
+    meaning that its API will be backward compatible unless it is deprecated.
+    Decorating functionality as stable will update its doc string to indicate
+    the first version of scikit-bio when the functionality was considered
+    stable.
+
+    Parameters
+    ----------
+    as_of : str
+        First release version where functionality is considered to be stable.
+
+    See Also
+    --------
+    experimental
+    deprecated
+
+    Examples
+    --------
+    >>> @stable(as_of='0.3.0')
+    ... def f_stable():
+    ...     \"\"\" An example stable function.
+    ...     \"\"\"
+    ...     pass
+    >>> help(f_stable)
+    Help on function f_stable in module skbio.util._decorator:
+    <BLANKLINE>
+    f_stable()
+        An example stable function.
+    <BLANKLINE>
+        State: Stable as of 0.3.0.
+    <BLANKLINE>
+    """
+
+    _required_kwargs = ('as_of', )
+
+    def __init__(self, *args, **kwargs):
+        self._validate_kwargs(**kwargs)
+        self.as_of = kwargs['as_of']
+
+    def __call__(self, func):
+        state_desc = 'Stable as of %s.' % self.as_of
+        func.__doc__ = self._update_docstring(func.__doc__, state_desc)
+        return func
+
+
+class experimental(_state_decorator):
+    """ State decorator indicating experimental functionality.
+
+    Used to indicate that public functionality is considered experimental,
+    meaning that its API is subject to change or removal with little or
+    (rarely) no warning. Decorating functionality as experimental will update
+    its doc string to indicate the first version of scikit-bio when the
+    functionality was considered experimental.
+
+    Parameters
+    ----------
+    as_of : str
+        First release version where feature is considered to be experimental.
+
+    See Also
+    --------
+    stable
+    deprecated
+
+    Examples
+    --------
+    >>> @experimental(as_of='0.3.0')
+    ... def f_experimental():
+    ...     \"\"\" An example experimental function.
+    ...     \"\"\"
+    ...     pass
+    >>> help(f_experimental)
+    Help on function f_experimental in module skbio.util._decorator:
+    <BLANKLINE>
+    f_experimental()
+        An example experimental function.
+    <BLANKLINE>
+        State: Experimental as of 0.3.0.
+    <BLANKLINE>
+
+    """
+
+    _required_kwargs = ('as_of', )
+
+    def __init__(self, *args, **kwargs):
+        self._validate_kwargs(**kwargs)
+        self.as_of = kwargs['as_of']
+
+    def __call__(self, func):
+        state_desc = 'Experimental as of %s.' % self.as_of
+        func.__doc__ = self._update_docstring(func.__doc__, state_desc)
+        return func
+
+
+class deprecated(_state_decorator):
+    """ State decorator indicating deprecated functionality.
+
+    Used to indicate that a public class or function is deprecated, meaning
+    that its API will be removed in a future version of scikit-bio. Decorating
+    functionality as experimental will update its doc string to indicate the
+    first version of scikit-bio when the functionality was deprecated, the
+    first version of scikit-bio when the functionality will no longer exist,
+    and the reason for deprecation of the API. It will also cause calls to the
+    API to raise a ``DeprecationWarning``.
+
+    Parameters
+    ----------
+    as_of : str
+        First development version where feature is considered to be deprecated.
+    until : str
+        First release version where feature will no longer exist.
+    reason : str
+        Brief description of why the API is deprecated.
+
+    See Also
+    --------
+    stable
+    experimental
+
+    Examples
+    --------
+    >>> @deprecated(as_of='0.3.0', until='0.3.3',
+    ...             reason='Use skbio.g().')
+    ... def f_deprecated(x, verbose=False):
+    ...     \"\"\" An example deprecated function.
+    ...     \"\"\"
+    ...     pass
+    >>> help(f_deprecated)
+    Help on function f_deprecated in module skbio.util._decorator:
+    <BLANKLINE>
+    f_deprecated(x, verbose=False)
+        An example deprecated function.
+    <BLANKLINE>
+        .. note:: Deprecated as of 0.3.0 for removal in 0.3.3. Use skbio.g().
+    <BLANKLINE>
+
+    """
+
+    _required_kwargs = ('as_of', 'until', 'reason')
+
+    def __init__(self, *args, **kwargs):
+        self._validate_kwargs(**kwargs)
+        self.as_of = kwargs['as_of']
+        self.until = kwargs['until']
+        self.reason = kwargs['reason']
+
+    def __call__(self, func, *args, **kwargs):
+        state_desc = 'Deprecated as of %s for removal in %s. %s' %\
+            (self.as_of, self.until, self.reason)
+        func.__doc__ = self._update_docstring(func.__doc__, state_desc,
+                                              state_desc_prefix='.. note:: ')
+
+        def wrapped_f(*args, **kwargs):
+            warnings.warn('%s is deprecated as of scikit-bio version %s, and '
+                          'will be removed in version %s. %s' %
+                          (func.__name__, self.as_of, self.until, self.reason),
+                          DeprecationWarning)
+            # args[0] is the function being wrapped when this is called
+            # after wrapping with decorator.decorator, but why???
+            return func(*args[1:], **kwargs)
+
+        return decorator.decorator(wrapped_f, func)
+
+
+# Adapted from http://stackoverflow.com/a/8313042/579416
+def overrides(interface_class):
+    """Decorator for class-level members.
+
+    Used to indicate that a member is being overridden from a specific parent
+    class. If the member does not have a docstring, it will pull one from the
+    parent class. When chaining decorators, this should be first as it is
+    relatively nondestructive.
+
+    Parameters
+    ----------
+    interface_class : class
+        The class which has a member overridden by the decorated member.
+
+    Returns
+    -------
+    function
+        The function is not changed or replaced.
+
+    Raises
+    ------
+    OverrideError
+        If the `interface_class` does not possess a member of the same name
+        as the decorated member.
+
+    """
+    def overrider(method):
+        if method.__name__ not in dir(interface_class):
+            raise OverrideError("%r is not present in parent class: %r." %
+                                (method.__name__, interface_class.__name__))
+        backup = classproperty.__get__
+        classproperty.__get__ = lambda x, y, z: x
+        if method.__doc__ is None:
+            method.__doc__ = getattr(interface_class, method.__name__).__doc__
+        classproperty.__get__ = backup
+        return method
+    return overrider
+
+
+class classproperty(property):
+    """Decorator for class-level properties.
+
+    Supports read access only. The property will be read-only within an
+    instance. However, the property can always be redefined on the class, since
+    Python classes are mutable.
+
+    Parameters
+    ----------
+    func : function
+        Method to make a class property.
+
+    Returns
+    -------
+    property
+        Decorated method.
+
+    Raises
+    ------
+    AttributeError
+        If the property is set on an instance.
+
+    """
+    def __init__(self, func):
+        name = func.__name__
+        doc = func.__doc__
+        super(classproperty, self).__init__(classmethod(func))
+        self.__name__ = name
+        self.__doc__ = doc
+
+    def __get__(self, cls, owner):
+        return self.fget.__get__(None, owner)()
+
+    def __set__(self, obj, value):
+        raise AttributeError("can't set attribute")
diff --git a/skbio/util/_exception.py b/skbio/util/_exception.py
index 328087a..cc251e5 100644
--- a/skbio/util/_exception.py
+++ b/skbio/util/_exception.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,7 +6,14 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 
 class TestingUtilError(Exception):
     """Raised when an exception is needed to test exception handling."""
     pass
+
+
+class OverrideError(AssertionError):
+    """Raised when a property does not exist in the parent class."""
+    pass
diff --git a/skbio/util/_misc.py b/skbio/util/_misc.py
index 90259f4..b79521c 100644
--- a/skbio/util/_misc.py
+++ b/skbio/util/_misc.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,12 +6,93 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import hashlib
 from os import remove, makedirs
 from os.path import exists, isdir
 from functools import partial
+from types import FunctionType
+import inspect
+from ._decorator import experimental, deprecated
+
+
+def make_sentinel(name):
+    return type(name, (object, ), {
+        '__repr__': lambda s: name,
+        '__str__': lambda s: name,
+        '__class__': None
+    })()
+
+
+def find_sentinels(function, sentinel):
+    keys = []
+    function_spec = inspect.getargspec(function)
+    if function_spec.defaults is not None:
+        # Concept from http://stackoverflow.com/a/12627202/579416
+        keywords_start = -len(function_spec.defaults)
+        for key, default in zip(function_spec.args[keywords_start:],
+                                function_spec.defaults):
+            if default is sentinel:
+                keys.append(key)
+    return keys
+
+
+class MiniRegistry(dict):
+    def __call__(self, name):
+        """Act as a decorator to register functions with self"""
+        def decorator(func):
+            self[name] = func
+            return func
+        return decorator
+
+    def copy(self):
+        """Useful for inheritance"""
+        return self.__class__(super(MiniRegistry, self).copy())
+
+    def formatted_listing(self):
+        """Produce an RST list with descriptions."""
+        if len(self) == 0:
+            return "\tNone"
+        else:
+            return "\n".join(["\t%r\n\t  %s" %
+                             (name, self[name].__doc__.split("\n")[0])
+                              for name in sorted(self)])
+
+    def interpolate(self, obj, name):
+        """Inject the formatted listing in the second blank line of `name`."""
+        # Py2/3 compatible way of calling getattr(obj, name).__func__
+        f = getattr(obj, name).__get__(None, type(None))
+
+        if hasattr(f, 'func_code'):
+            f2 = FunctionType(f.func_code, f.func_globals, name=f.func_name,
+                              argdefs=f.func_defaults, closure=f.func_closure)
+        else:
+            f2 = FunctionType(f.__code__, f.__globals__, name=f.__name__,
+                              argdefs=f.__defaults__, closure=f.__closure__)
+        # Conveniently the original docstring is on f2, not the new ones if
+        # inheritence is happening. I have no idea why.
+        t = f2.__doc__.split("\n\n")
+        t.insert(2, self.formatted_listing())
+        f2.__doc__ = "\n\n".join(t)
+
+        setattr(obj, name, f2)
 
 
+def chunk_str(s, n, char):
+    """Insert `char` character every `n` characters in string `s`.
+
+    Canonically pronounced "chunkster".
+
+    """
+    # Modified from http://stackoverflow.com/a/312464/3776794
+    if n < 1:
+        raise ValueError(
+            "Cannot split string into chunks with n=%d. n must be >= 1." % n)
+    return char.join((s[i:i+n] for i in range(0, len(s), n)))
+
+
+ at experimental(as_of="0.4.0")
 def cardinal_to_ordinal(n):
     """Return ordinal string version of cardinal int `n`.
 
@@ -57,6 +136,7 @@ def cardinal_to_ordinal(n):
     return "%d%s" % (n, "tsnrhtdd"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])
 
 
+ at experimental(as_of="0.4.0")
 def is_casava_v180_or_later(header_line):
     """Check if the header looks like it is Illumina software post-casava v1.8
 
@@ -87,6 +167,7 @@ def is_casava_v180_or_later(header_line):
     return len(fields) == 10 and fields[7] in b'YN'
 
 
+ at experimental(as_of="0.4.0")
 def safe_md5(open_file, block_size=2 ** 20):
     """Computes an md5 sum without loading the file into memory
 
@@ -128,6 +209,7 @@ def safe_md5(open_file, block_size=2 ** 20):
     return md5
 
 
+ at experimental(as_of="0.4.0")
 def remove_files(list_of_filepaths, error_on_missing=True):
     """Remove list of filepaths, optionally raising an error if any are missing
 
@@ -169,6 +251,7 @@ def remove_files(list_of_filepaths, error_on_missing=True):
                       '\t'.join(missing))
 
 
+ at experimental(as_of="0.4.0")
 def create_dir(dir_name, fail_on_exist=False, handle_errors_externally=False):
     """Create a directory safely and fail meaningfully
 
@@ -238,6 +321,7 @@ def create_dir(dir_name, fail_on_exist=False, handle_errors_externally=False):
     return error_code_lookup['NO_ERROR']
 
 
+ at experimental(as_of="0.4.0")
 def find_duplicates(iterable):
     """Find duplicate elements in an iterable.
 
@@ -264,7 +348,15 @@ def find_duplicates(iterable):
             seen.add(e)
     return repeated
 
+flatten_deprecation_reason = (
+    "Solutions to this problem exist in the python standarnd library. "
+    "Please refer to the following links for good alternatives:\n"
+    "http://stackoverflow.com/a/952952/3639023\n"
+    "http://stackoverflow.com/a/406199/3639023")
+
 
+ at deprecated(as_of="0.2.3-dev", until="0.4.1",
+            reason=flatten_deprecation_reason)
 def flatten(items):
     """Removes one level of nesting from items
 
diff --git a/skbio/util/_testing.py b/skbio/util/_testing.py
index b9495bd..a8e616e 100644
--- a/skbio/util/_testing.py
+++ b/skbio/util/_testing.py
@@ -6,10 +6,68 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import os
 import inspect
 
+import pandas.util.testing as pdt
+from nose import core
+from nose.tools import nottest
+from future.utils import PY3
+
+from ._decorator import experimental
+
+
+ at nottest
+class TestRunner(object):
+    """Simple wrapper class around nosetests functionality.
+
+    Parameters
+    ----------
+    filename : str
+        __file__ attribute passed in from the caller. This tells the
+        tester where to start looking for tests.
+
+    Notes
+    -----
+    The primary purpose of this class is to create an interface which users
+    of scikit-bio can use to run all of the built in tests. Normally this
+    would be done by invoking nosetests directly from the command line, but
+    scikit-bio needs several additional options which make the command long
+    and ugly. This class invokes nose with the required options.
+
+    """
+    @experimental(as_of="0.4.0")
+    def __init__(self, filename):
+        self._filename = filename
+        self._test_dir = os.path.dirname(filename)
+
+    @experimental(as_of="0.4.0")
+    def test(self, verbose=False):
+        """Performs the actual running of the tests.
+
+        Parameters
+        ----------
+        verbose : bool
+            flag for running in verbose mode.
+
+        Returns
+        -------
+        bool
+            test run success status
+        """
+        # NOTE: it doesn't seem to matter what the first element of the argv
+        # list is, there just needs to be something there.
+        argv = [self._filename, '-I DO_NOT_IGNORE_ANYTHING']
+        if not PY3:
+            argv.extend(['--with-doctest', '--doctest-tests'])
+        if verbose:
+            argv.append('-v')
+        return core.run(argv=argv, defaultTest=self._test_dir)
 
+
+ at experimental(as_of="0.4.0")
 def get_data_path(fn, subfolder='data'):
     """Return path to filename ``fn`` in the data folder.
 
@@ -45,3 +103,57 @@ def get_data_path(fn, subfolder='data'):
     path = os.path.dirname(os.path.abspath(callers_filename))
     data_path = os.path.join(path, subfolder, fn)
     return data_path
+
+
+ at experimental(as_of="0.4.0")
+def assert_data_frame_almost_equal(left, right):
+    """Raise AssertionError if ``pd.DataFrame`` objects are not "almost equal".
+
+    Wrapper of ``pd.util.testing.assert_frame_equal``. Floating point values
+    are considered "almost equal" if they are within a threshold defined by
+    ``assert_frame_equal``. This wrapper uses a number of
+    checks that are turned off by default in ``assert_frame_equal`` in order to
+    perform stricter comparisons (for example, ensuring the index and column
+    types are the same). It also does not consider empty ``pd.DataFrame``
+    objects equal if they have a different index.
+
+    Other notes:
+
+    * Index (row) and column ordering must be the same for objects to be equal.
+    * NaNs (``np.nan``) in the same locations are considered equal.
+
+    This is a helper function intended to be used in unit tests that need to
+    compare ``pd.DataFrame`` objects.
+
+    Parameters
+    ----------
+    left, right : pd.DataFrame
+        ``pd.DataFrame`` objects to compare.
+
+    Raises
+    ------
+    AssertionError
+        If `left` and `right` are not "almost equal".
+
+    See Also
+    --------
+    pandas.util.testing.assert_frame_equal
+
+    """
+    # pass all kwargs to ensure this function has consistent behavior even if
+    # `assert_frame_equal`'s defaults change
+    pdt.assert_frame_equal(left, right,
+                           check_dtype=True,
+                           check_index_type=True,
+                           check_column_type=True,
+                           check_frame_type=True,
+                           check_less_precise=False,
+                           check_names=True,
+                           by_blocks=False,
+                           check_exact=False)
+    # this check ensures that empty DataFrames with different indices do not
+    # compare equal. exact=True specifies that the type of the indices must be
+    # exactly the same
+    pdt.assert_index_equal(left.index, right.index,
+                           exact=True,
+                           check_names=True)
diff --git a/skbio/util/_warning.py b/skbio/util/_warning.py
index 91cb141..a69f14f 100644
--- a/skbio/util/_warning.py
+++ b/skbio/util/_warning.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, print_function
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,6 +6,8 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 
 class EfficiencyWarning(Warning):
     """Warn about potentially accidental use of inefficient code.
diff --git a/skbio/util/tests/__init__.py b/skbio/util/tests/__init__.py
index c99682c..3fe3dc6 100644
--- a/skbio/util/tests/__init__.py
+++ b/skbio/util/tests/__init__.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -7,3 +5,5 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
diff --git a/skbio/util/tests/test_decorator.py b/skbio/util/tests/test_decorator.py
new file mode 100644
index 0000000..eef2d1f
--- /dev/null
+++ b/skbio/util/tests/test_decorator.py
@@ -0,0 +1,275 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+import unittest
+import inspect
+import warnings
+
+from skbio.util._decorator import classproperty, overrides
+from skbio.util._decorator import (stable, experimental, deprecated,
+                                   _state_decorator)
+from skbio.util._exception import OverrideError
+
+
+class TestOverrides(unittest.TestCase):
+    def test_raises_when_missing(self):
+        class A(object):
+            pass
+
+        with self.assertRaises(OverrideError):
+            class B(A):
+                @overrides(A)
+                def test(self):
+                    pass
+
+    def test_doc_inherited(self):
+        class A(object):
+            def test(self):
+                """Docstring"""
+                pass
+
+        class B(A):
+            @overrides(A)
+            def test(self):
+                pass
+
+        self.assertEqual(B.test.__doc__, "Docstring")
+
+    def test_doc_not_inherited(self):
+        class A(object):
+            def test(self):
+                """Docstring"""
+                pass
+
+        class B(A):
+            @overrides(A)
+            def test(self):
+                """Different"""
+                pass
+
+        self.assertEqual(B.test.__doc__, "Different")
+
+
+class TestClassProperty(unittest.TestCase):
+    def test_getter_only(self):
+        class Foo(object):
+            _foo = 42
+
+            @classproperty
+            def foo(cls):
+                return cls._foo
+
+        # class-level getter
+        self.assertEqual(Foo.foo, 42)
+
+        # instance-level getter
+        f = Foo()
+        self.assertEqual(f.foo, 42)
+
+        with self.assertRaises(AttributeError):
+            f.foo = 4242
+
+
+class TestStabilityState(unittest.TestCase):
+    # the indentation spacing gets weird, so I'm defining the
+    # input doc string explicitly and adding it after function
+    # defintion
+    _test_docstring = (" Add 42, or something else, to x.\n"
+                       "\n"
+                       "    Parameters\n"
+                       "    ----------\n"
+                       "    x : int, x\n"
+                       "    y : int, optional\n")
+
+
+class TestBase(TestStabilityState):
+
+    def test_get_indentation_level(self):
+
+        c = _state_decorator()
+        self.assertEqual(c._get_indentation_level([]), 0)
+        self.assertEqual(
+            c._get_indentation_level([], default_no_existing_docstring=3), 3)
+        self.assertEqual(c._get_indentation_level([""]), 4)
+        self.assertEqual(
+            c._get_indentation_level([""], default_existing_docstring=3), 3)
+
+        in_ = (["summary"])
+        self.assertEqual(c._get_indentation_level(in_), 4)
+        in_ = (["summary", "", "", "    ", "", " ", ""])
+        self.assertEqual(c._get_indentation_level(in_), 4)
+
+        in_ = (["summary", "     More indentation", " Less indentation"])
+        self.assertEqual(c._get_indentation_level(in_), 5)
+
+    def test_update_docstring(self):
+        c = _state_decorator()
+        in_ = None
+        exp = ("""State: Test!!""")
+        self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
+
+        in_ = """"""
+        exp = ("""\n\n    State: Test!!""")
+        self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
+
+        in_ = ("""Short summary\n\n    Parameters\n\n----------\n    """
+               """x : int\n""")
+        exp = ("""Short summary\n\n    State: Test!!\n\n"""
+               """    Parameters\n\n----------\n    x : int\n""")
+        self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
+
+        in_ = ("""Short summary\n\n      Parameters\n\n----------\n      """
+               """x : int\n""")
+        exp = ("""Short summary\n\n      State: Test!!\n\n"""
+               """      Parameters\n\n----------\n      x : int\n""")
+        self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
+
+        in_ = ("""Short summary\n\n    Parameters\n\n----------\n    """
+               """x : int\n""")
+        exp = ("""Short summary\n\n    State: Test!!Test!!Test!!Test!!Test!!"""
+               """Test!!Test!!Test!!Test!!Test!!Test!!Te\n           st!!T"""
+               """est!!Test!!Test!!Test!!Test!!Test!!Test!!Test!!\n\n"""
+               """    Parameters\n\n----------\n    x : int\n""")
+        self.assertEqual(c._update_docstring(in_, "Test!!"*20), exp)
+
+
+class TestStable(TestStabilityState):
+
+    def _get_f(self, as_of):
+        def f(x, y=42):
+            return x + y
+        f.__doc__ = self._test_docstring
+        f = stable(as_of=as_of)(f)
+        return f
+
+    def test_function_output(self):
+        f = self._get_f('0.1.0')
+        self.assertEqual(f(1), 43)
+
+    def test_function_docstring(self):
+        f = self._get_f('0.1.0')
+        e1 = (" Add 42, or something else, to x.\n\n"
+              "    State: Stable as of 0.1.0.\n\n"
+              "    Parameters")
+        self.assertTrue(f.__doc__.startswith(e1))
+        f = self._get_f('0.1.1')
+        e1 = (" Add 42, or something else, to x.\n\n"
+              "    State: Stable as of 0.1.1.\n\n"
+              "    Parameters")
+        self.assertTrue(f.__doc__.startswith(e1))
+
+    def test_function_signature(self):
+        f = self._get_f('0.1.0')
+        expected = inspect.ArgSpec(
+            args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
+        self.assertEqual(inspect.getargspec(f), expected)
+        self.assertEqual(f.__name__, 'f')
+
+    def test_missing_kwarg(self):
+        self.assertRaises(ValueError, stable)
+        self.assertRaises(ValueError, stable, '0.1.0')
+
+
+class TestExperimental(TestStabilityState):
+
+    def _get_f(self, as_of):
+        def f(x, y=42):
+            return x + y
+        f.__doc__ = self._test_docstring
+        f = experimental(as_of=as_of)(f)
+        return f
+
+    def test_function_output(self):
+        f = self._get_f('0.1.0')
+        self.assertEqual(f(1), 43)
+
+    def test_function_docstring(self):
+        f = self._get_f('0.1.0')
+        e1 = (" Add 42, or something else, to x.\n\n"
+              "    State: Experimental as of 0.1.0.\n\n"
+              "    Parameters")
+        self.assertTrue(f.__doc__.startswith(e1))
+        f = self._get_f('0.1.1')
+        e1 = (" Add 42, or something else, to x.\n\n"
+              "    State: Experimental as of 0.1.1.\n\n"
+              "    Parameters")
+        self.assertTrue(f.__doc__.startswith(e1))
+
+    def test_function_signature(self):
+        f = self._get_f('0.1.0')
+        expected = inspect.ArgSpec(
+            args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
+        self.assertEqual(inspect.getargspec(f), expected)
+        self.assertEqual(f.__name__, 'f')
+
+    def test_missing_kwarg(self):
+        self.assertRaises(ValueError, experimental)
+        self.assertRaises(ValueError, experimental, '0.1.0')
+
+
+class TestDeprecated(TestStabilityState):
+
+    def _get_f(self, as_of, until, reason):
+        def f(x, y=42):
+            return x + y
+        f.__doc__ = self._test_docstring
+        f = deprecated(as_of=as_of, until=until, reason=reason)(f)
+        return f
+
+    def test_function_output(self):
+        f = self._get_f('0.1.0', until='0.1.4',
+                        reason='You should now use skbio.g().')
+        self.assertEqual(f(1), 43)
+
+    def test_deprecation_warning(self):
+        f = self._get_f('0.1.0', until='0.1.4',
+                        reason='You should now use skbio.g().')
+        # adapted from SO example here: http://stackoverflow.com/a/3892301
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            f(1)
+            self.assertTrue(issubclass(w[0].category, DeprecationWarning))
+            expected_str = "is deprecated as of scikit-bio version 0.1.0"
+            self.assertTrue(expected_str in str(w[0].message))
+
+    def test_function_docstring(self):
+        f = self._get_f('0.1.0', until='0.1.4',
+                        reason='You should now use skbio.g().')
+        e1 = (" Add 42, or something else, to x.\n\n"
+              "    .. note:: Deprecated as of 0.1.0 for "
+              "removal in 0.1.4. You should now use\n"
+              "              skbio.g().\n\n"
+              "    Parameters")
+        self.assertTrue(f.__doc__.startswith(e1))
+
+        f = self._get_f('0.1.1', until='0.1.5',
+                        reason='You should now use skbio.h().')
+        e1 = (" Add 42, or something else, to x.\n\n"
+              "    .. note:: Deprecated as of 0.1.1 for "
+              "removal in 0.1.5. You should now use\n"
+              "              skbio.h().\n\n"
+              "    Parameters")
+        self.assertTrue(f.__doc__.startswith(e1))
+
+    def test_function_signature(self):
+        f = self._get_f('0.1.0', until='0.1.4',
+                        reason='You should now use skbio.g().')
+        expected = inspect.ArgSpec(
+            args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
+        self.assertEqual(inspect.getargspec(f), expected)
+        self.assertEqual(f.__name__, 'f')
+
+    def test_missing_kwarg(self):
+        self.assertRaises(ValueError, deprecated)
+        self.assertRaises(ValueError, deprecated, '0.1.0')
+        self.assertRaises(ValueError, deprecated, as_of='0.1.0')
+        self.assertRaises(ValueError, deprecated, as_of='0.1.0', until='0.1.4')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/util/tests/test_misc.py b/skbio/util/tests/test_misc.py
index 9e5b5fb..6897bf3 100644
--- a/skbio/util/tests/test_misc.py
+++ b/skbio/util/tests/test_misc.py
@@ -8,21 +8,157 @@
 
 from __future__ import absolute_import, division, print_function
 from future.builtins import range
+import six
 from six import BytesIO
 
+import unittest
 from tempfile import NamedTemporaryFile, mkdtemp
 from os.path import exists, join
-from unittest import TestCase, main
 from shutil import rmtree
 from uuid import uuid4
 
 from skbio.util import (cardinal_to_ordinal, safe_md5, remove_files,
                         create_dir, find_duplicates, flatten,
                         is_casava_v180_or_later)
-from skbio.util._misc import _handle_error_codes
+from skbio.util._misc import _handle_error_codes, MiniRegistry, chunk_str
 
 
-class MiscTests(TestCase):
+class TestMiniRegistry(unittest.TestCase):
+    def setUp(self):
+        self.registry = MiniRegistry()
+
+    def test_decoration(self):
+        self.assertNotIn("name1", self.registry)
+        self.assertNotIn("name2", self.registry)
+        self.n1_called = False
+        self.n2_called = False
+
+        @self.registry("name1")
+        def some_registration1():
+            self.n1_called = True
+
+        @self.registry("name2")
+        def some_registration2():
+            self.n2_called = True
+
+        self.assertIn("name1", self.registry)
+        self.assertEqual(some_registration1, self.registry["name1"])
+        self.assertIn("name2", self.registry)
+        self.assertEqual(some_registration2, self.registry["name2"])
+
+        self.registry["name1"]()
+        self.assertTrue(self.n1_called)
+        self.registry["name2"]()
+        self.assertTrue(self.n2_called)
+
+    def test_copy(self):
+        @self.registry("name")
+        def some_registration():
+            pass
+
+        new = self.registry.copy()
+        self.assertIsNot(new, self.registry)
+
+        @new("other")
+        def other_registration():
+            pass
+
+        self.assertIn("name", self.registry)
+        self.assertNotIn("other", self.registry)
+
+        self.assertIn("other", new)
+        self.assertIn("name", new)
+
+    def test_everything(self):
+        class SomethingToInterpolate(object):
+            def interpolate_me():
+                """First line
+
+                Some description of things, also this:
+
+                Other things are happening now.
+                """
+
+            def dont_interpolate_me():
+                """First line
+
+                Some description of things, also this:
+
+                Other things are happening now.
+                """
+
+        class Subclass(SomethingToInterpolate):
+            pass
+
+        @self.registry("a")
+        def a():
+            """x"""
+
+        @self.registry("b")
+        def b():
+            """y"""
+
+        @self.registry("c")
+        def c():
+            """z"""
+
+        subclass_registry = self.registry.copy()
+
+        @subclass_registry("o")
+        def o():
+            """p"""
+
+        self.registry.interpolate(SomethingToInterpolate, "interpolate_me")
+        subclass_registry.interpolate(Subclass, "interpolate_me")
+
+        self.assertEqual(SomethingToInterpolate.interpolate_me.__doc__,
+                         "First line\n\n                Some description of th"
+                         "ings, also this:\n\n\t'a'\n\t  x\n\t'b'\n\t  y\n\t'c"
+                         "'\n\t  z\n\n                Other things are happeni"
+                         "ng now.\n                ")
+        self.assertEqual(SomethingToInterpolate.dont_interpolate_me.__doc__,
+                         "First line\n\n                Some description of th"
+                         "ings, also this:\n\n                Other things are"
+                         " happening now.\n                ")
+        self.assertEqual(Subclass.interpolate_me.__doc__,
+                         "First line\n\n                Some description of th"
+                         "ings, also this:\n\n\t'a'\n\t  x\n\t'b'\n\t  y\n\t'c"
+                         "'\n\t  z\n\t'o'\n\t  p\n\n                Other thin"
+                         "gs are happening now.\n                ")
+        self.assertEqual(Subclass.dont_interpolate_me.__doc__,
+                         "First line\n\n                Some description of th"
+                         "ings, also this:\n\n                Other things are"
+                         " happening now.\n                ")
+
+
+class ChunkStrTests(unittest.TestCase):
+    def test_even_split(self):
+        self.assertEqual(chunk_str('abcdef', 6, ' '), 'abcdef')
+        self.assertEqual(chunk_str('abcdef', 3, ' '), 'abc def')
+        self.assertEqual(chunk_str('abcdef', 2, ' '), 'ab cd ef')
+        self.assertEqual(chunk_str('abcdef', 1, ' '), 'a b c d e f')
+        self.assertEqual(chunk_str('a', 1, ' '), 'a')
+        self.assertEqual(chunk_str('abcdef', 2, ''), 'abcdef')
+
+    def test_no_split(self):
+        self.assertEqual(chunk_str('', 2, '\n'), '')
+        self.assertEqual(chunk_str('a', 100, '\n'), 'a')
+        self.assertEqual(chunk_str('abcdef', 42, '|'), 'abcdef')
+
+    def test_uneven_split(self):
+        self.assertEqual(chunk_str('abcdef', 5, '|'), 'abcde|f')
+        self.assertEqual(chunk_str('abcdef', 4, '|'), 'abcd|ef')
+        self.assertEqual(chunk_str('abcdefg', 3, ' - '), 'abc - def - g')
+
+    def test_invalid_n(self):
+        with six.assertRaisesRegex(self, ValueError, 'n=0'):
+            chunk_str('abcdef', 0, ' ')
+
+        with six.assertRaisesRegex(self, ValueError, 'n=-42'):
+            chunk_str('abcdef', -42, ' ')
+
+
+class MiscTests(unittest.TestCase):
     def setUp(self):
         self.dirs_to_remove = []
 
@@ -107,7 +243,7 @@ class MiscTests(TestCase):
         self.assertEqual(flatten([1, [2, 3], [[4, [5]]]]), [1, 2, 3, [4, [5]]])
 
 
-class CardinalToOrdinalTests(TestCase):
+class CardinalToOrdinalTests(unittest.TestCase):
     def test_valid_range(self):
         # taken and modified from http://stackoverflow.com/a/20007730/3776794
         exp = ['0th', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th',
@@ -120,11 +256,11 @@ class CardinalToOrdinalTests(TestCase):
         self.assertEqual(obs, exp)
 
     def test_invalid_n(self):
-        with self.assertRaisesRegexp(ValueError, '-1'):
+        with six.assertRaisesRegex(self, ValueError, '-1'):
             cardinal_to_ordinal(-1)
 
 
-class TestFindDuplicates(TestCase):
+class TestFindDuplicates(unittest.TestCase):
     def test_empty_input(self):
         def empty_gen():
             raise StopIteration()
@@ -157,4 +293,4 @@ class TestFindDuplicates(TestCase):
 
 
 if __name__ == '__main__':
-    main()
+    unittest.main()
diff --git a/skbio/util/tests/test_testing.py b/skbio/util/tests/test_testing.py
index e0b6edd..b6adf52 100644
--- a/skbio/util/tests/test_testing.py
+++ b/skbio/util/tests/test_testing.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -8,21 +6,83 @@ from __future__ import absolute_import, division, print_function
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 import os
+import itertools
+import unittest
+
+import pandas as pd
+import numpy as np
+
+from skbio.util import get_data_path, assert_data_frame_almost_equal
+
+
+class TestGetDataPath(unittest.TestCase):
+    def test_get_data_path(self):
+        fn = 'parrot'
+        path = os.path.dirname(os.path.abspath(__file__))
+        data_path = os.path.join(path, 'data', fn)
+        data_path_2 = get_data_path(fn)
+        self.assertEqual(data_path_2, data_path)
+
+
+class TestAssertDataFrameAlmostEqual(unittest.TestCase):
+    def setUp(self):
+        self.df = pd.DataFrame(
+            {'foo': [42, 42.0, np.nan, 0],
+             'bar': ['a', 'b', 'cd', 'e']})
+
+    def test_not_equal(self):
+        unequal_dfs = [
+            self.df,
+            # floating point error too large to be "almost equal"
+            pd.DataFrame({'foo': [42, 42.001, np.nan, 0],
+                          'bar': ['a', 'b', 'cd', 'e']}),
+            # extra NaN
+            pd.DataFrame({'foo': [42, np.nan, np.nan, 0],
+                          'bar': ['a', 'b', 'cd', 'e']}),
+            # different column order
+            pd.DataFrame(self.df, columns=['foo', 'bar']),
+            # different index order
+            pd.DataFrame(self.df, index=np.arange(4)[::-1]),
+            # different index type
+            pd.DataFrame(self.df, index=np.arange(4).astype(float)),
+            # various forms of "empty" DataFrames that are not equivalent
+            pd.DataFrame(),
+            pd.DataFrame(index=np.arange(10)),
+            pd.DataFrame(columns=np.arange(10)),
+            pd.DataFrame(index=np.arange(10), columns=np.arange(10)),
+            pd.DataFrame(index=np.arange(9)),
+            pd.DataFrame(columns=np.arange(9)),
+            pd.DataFrame(index=np.arange(9), columns=np.arange(9))
+        ]
+
+        # each df should compare equal to itself
+        for df in unequal_dfs:
+            assert_data_frame_almost_equal(df, df)
 
-import numpy.testing as npt
+        # every pair of dfs should not compare equal. use permutations instead
+        # of combinations to test that comparing df1 to df2 and df2 to df1 are
+        # both not equal
+        for df1, df2 in itertools.permutations(unequal_dfs, 2):
+            with self.assertRaises(AssertionError):
+                assert_data_frame_almost_equal(df1, df2)
 
-from skbio.util import get_data_path
+    def test_equal(self):
+        equal_dfs = [
+            self.df,
+            # floating point error small enough to be "almost equal"
+            pd.DataFrame({'foo': [42, 42.00001, np.nan, 0],
+                          'bar': ['a', 'b', 'cd', 'e']})
+        ]
 
+        for df in equal_dfs:
+            assert_data_frame_almost_equal(df, df)
 
-def test_get_data_path():
-    fn = 'parrot'
-    path = os.path.dirname(os.path.abspath(__file__))
-    data_path = os.path.join(path, 'data', fn)
-    data_path_2 = get_data_path(fn)
-    npt.assert_string_equal(data_path_2, data_path)
+        for df1, df2 in itertools.permutations(equal_dfs, 2):
+            assert_data_frame_almost_equal(df1, df2)
 
 
 if __name__ == '__main__':
-    import nose
-    nose.runmodule()
+    unittest.main()
diff --git a/skbio/workflow.py b/skbio/workflow.py
index fb68387..8d6778e 100644
--- a/skbio/workflow.py
+++ b/skbio/workflow.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 r"""
 Constructing workflows (:mod:`skbio.workflow`)
 ==============================================
@@ -90,7 +89,7 @@ exciting on success.
 Now, lets process some data!
 
 >>> for result in wf(seqs, success_callback=success_f, fail_callback=fail_f):
-...     print result
+...     print(result)
 SUCCESS: AAAAAAATTTTTTT
 FAIL: ATAGACC
 SUCCESS: CCGGAC
@@ -111,9 +110,9 @@ to walk through an item at a time so we can examine the debug information.
 >>> gen = wf(seqs, fail_callback=lambda x: x.state)
 >>> gen.next()
 'TTTTTTTAAAAAAA'
->>> print wf.failed
+>>> print(wf.failed)
 False
->>> print wf.debug_trace
+>>> print(wf.debug_trace)
 set([('check_length', 0), ('reverse', 2)])
 
 The ``debug_trace`` specifies the methods executed, and the order of their
@@ -128,9 +127,9 @@ workflow was a failed item.
 
 >>> gen.next()
 'ATAGACC'
->>> print wf.failed
+>>> print(wf.failed)
 True
->>> print wf.debug_trace
+>>> print(wf.debug_trace)
 set([('check_length', 0)])
 
 What we can see is that the failed sequence only executed the check_length
@@ -144,7 +143,7 @@ truncation. Let's see what that looks like in the debug output.
 
 >>> gen.next() #
 'CAGGCC'
->>> print wf.failed
+>>> print(wf.failed)
 False
 >>> wf.debug_trace
 set([('check_length', 0), ('truncate', 1), ('reverse', 2)])
@@ -201,6 +200,8 @@ allow you to indicate ``anything`` as an option value, anything that is
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import absolute_import, division, print_function
+
 from future.utils import viewitems
 
 import sys
@@ -210,12 +211,18 @@ from functools import update_wrapper
 from collections import Iterable
 from types import MethodType
 
+import six
+
+from skbio.util._decorator import experimental
+
 
 class NotExecuted(object):
     """Helper object to track if a method was executed"""
+    @experimental(as_of="0.4.0")
     def __init__(self):
         self.msg = None
 
+    @experimental(as_of="0.4.0")
     def __call__(self, msg):
         self.msg = msg
         return self
@@ -224,12 +231,14 @@ _not_executed = NotExecuted()
 
 class Exists(object):
     """Stub object to assist with ``requires`` when a value exists"""
+    @experimental(as_of="0.4.0")
     def __contains__(self, item):
         return True
 anything = Exists()  # external, for when a value can be anything
 
 
 class NotNone(object):
+    @experimental(as_of="0.4.0")
     def __contains__(self, item):
         if item is None:
             return False
@@ -269,16 +278,9 @@ class Workflow(object):
         This is handy if additional contextual information is needed by a
         workflow method (e.g., a lookup table).
 
-    Attributes
-    ----------
-    state
-    short_circuit
-    debug
-    options
-    failed
-
     """
 
+    @experimental(as_of="0.4.0")
     def __init__(self, state, short_circuit=True, debug=False, options=None,
                  **kwargs):
         r"""Build thy workflow of self"""
@@ -301,6 +303,7 @@ class Workflow(object):
         if self.debug:
             self._setup_debug()
 
+    @experimental(as_of="0.4.0")
     def initialize_state(self, item):
         """Initialize state
 
@@ -375,6 +378,7 @@ class Workflow(object):
         self.debug_pre_state = {}
         self.debug_post_state = {}
 
+    @experimental(as_of="0.4.0")
     def __call__(self, iter_, success_callback=None, fail_callback=None):
         """Operate on all the data
 
@@ -392,7 +396,6 @@ class Workflow(object):
         fail_callback : method to call on a failed item prior to yielding. By
             default, failures are ignored.
 
-        .. shownumpydoc
         """
         if success_callback is None:
             def success_callback(x):
@@ -458,9 +461,11 @@ class method(object):
     """
     highest_priority = sys.maxsize
 
+    @experimental(as_of="0.4.0")
     def __init__(self, priority=0):
         self.priority = priority
 
+    @experimental(as_of="0.4.0")
     def __call__(self, func):
         func.priority = self.priority
         return func
@@ -489,6 +494,7 @@ class requires(object):
         requirement is not satisfied. This method will be passed the
         containing ``Workflow``s' ``state`` member variable.
     """
+    @experimental(as_of="0.4.0")
     def __init__(self, option=None, values=anything, state=None):
         # self here is the requires object
         self.option = option
@@ -501,13 +507,14 @@ class requires(object):
         elif isinstance(values, set):
             self.values = values
         else:
-            if isinstance(values, str):
+            if isinstance(values, six.string_types):
                 self.values = values
             elif isinstance(values, Iterable):
                 self.values = set(values)
             else:
                 self.values = set([values])
 
+    @experimental(as_of="0.4.0")
     def __call__(self, func):
         """Wrap a function
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-skbio.git



More information about the debian-med-commit mailing list