[med-svn] [python-biom-format] 03/05: Imported Upstream version 2.1.5+dfsg

Andreas Tille tille at debian.org
Thu Feb 11 16:02:31 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository python-biom-format.

commit 20df9f62b78613ceed9c7049ea158b13e0702774
Author: Andreas Tille <tille at debian.org>
Date:   Thu Feb 11 12:43:22 2016 +0100

    Imported Upstream version 2.1.5+dfsg
---
 .travis.yml                                        |  29 ++-
 ChangeLog.md                                       |  33 ++-
 INSTALL                                            |  24 --
 MANIFEST.in                                        |   1 -
 README.md                                          |   3 +-
 biom/__init__.py                                   |   5 +-
 biom/cli/__init__.py                               |  31 +++
 biom/cli/installation_informer.py                  | 121 ++++++++++
 biom/cli/metadata_adder.py                         | 186 +++++++++++++++
 biom/cli/table_converter.py                        | 209 +++++++++++++++++
 biom/cli/table_head.py                             |  46 ++++
 biom/cli/table_normalizer.py                       |  74 ++++++
 biom/cli/table_subsetter.py                        | 139 +++++++++++
 biom/cli/table_summarizer.py                       | 136 +++++++++++
 biom/{commands => cli}/table_validator.py          | 107 +++++----
 biom/cli/uc_processor.py                           |  85 +++++++
 biom/cli/util.py                                   |  35 +++
 biom/commands/__init__.py                          |   0
 biom/commands/installation_informer.py             | 123 ----------
 biom/commands/metadata_adder.py                    | 165 -------------
 biom/commands/table_converter.py                   | 221 -----------------
 biom/commands/table_normalizer.py                  |  91 -------
 biom/commands/table_subsetter.py                   | 122 ----------
 biom/commands/table_summarizer.py                  | 168 -------------
 biom/interfaces/__init__.py                        |   0
 biom/interfaces/html/__init__.py                   |   0
 biom/interfaces/html/config/__init__.py            |   0
 biom/interfaces/html/config/add_metadata.py        |  99 --------
 biom/interfaces/html/config/convert.py             |  74 ------
 biom/interfaces/html/config/normalize_table.py     |  56 -----
 biom/interfaces/html/config/show_install_info.py   |  36 ---
 biom/interfaces/html/config/summarize_table.py     |  53 -----
 biom/interfaces/html/config/validate_table.py      |  55 -----
 biom/interfaces/html/input_handler.py              |  56 -----
 biom/interfaces/optparse/__init__.py               |   0
 biom/interfaces/optparse/config/__init__.py        |   0
 biom/interfaces/optparse/config/add_metadata.py    | 114 ---------
 biom/interfaces/optparse/config/convert.py         |  81 -------
 biom/interfaces/optparse/config/normalize_table.py |  84 -------
 .../optparse/config/show_install_info.py           |  39 ---
 biom/interfaces/optparse/config/subset_table.py    |  77 ------
 biom/interfaces/optparse/config/summarize_table.py |  64 -----
 biom/interfaces/optparse/config/validate_table.py  |  65 -----
 biom/interfaces/optparse/input_handler.py          |  79 -------
 biom/interfaces/optparse/output_handler.py         |  80 -------
 biom/parse.py                                      | 114 ++++++++-
 biom/table.py                                      | 261 +++++++++++++--------
 biom/util.py                                       |  26 +-
 doc/conf.py                                        |   4 +-
 doc/index.rst                                      |  88 ++-----
 scripts/biom                                       |  22 --
 scripts/serve-biom                                 |  11 -
 setup.py                                           |  40 ++--
 .../test_cli/__init__.py                           |   4 +-
 .../test_add_metadata.py}                          |  47 +---
 .../test_data/json_obs_collapsed.biom              |   0
 .../test_data/json_sample_collapsed.biom           |   0
 .../test_data/test.biom                            | Bin
 tests/test_cli/test_data/test.json                 |   1 +
 tests/test_cli/test_show_install_info.py           |  24 ++
 .../test_subset_table.py}                          |  82 +++----
 tests/test_cli/test_summarize_table.py             | 122 ++++++++++
 .../test_table_converter.py                        | 256 +++++++++-----------
 tests/test_cli/test_table_normalizer.py            |  50 ++++
 tests/test_cli/test_uc_processor.py                | 109 +++++++++
 .../test_validate_table.py}                        |   2 +-
 tests/test_commands/__init__.py                    |   0
 tests/test_commands/test_installation_informer.py  |  40 ----
 tests/test_commands/test_table_normalizer.py       |  56 -----
 tests/test_commands/test_table_summarizer.py       | 132 -----------
 tests/test_data/test.json                          |   2 +-
 tests/test_interfaces/__init__.py                  |   0
 tests/test_interfaces/test_optparse/__init__.py    |   0
 .../test_optparse/test_input_handler.py            | 118 ----------
 tests/test_parse.py                                | 143 ++++++++++-
 tests/test_table.py                                | 152 ++++++++----
 tests/test_util.py                                 |   2 +-
 77 files changed, 2224 insertions(+), 2950 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 3e59cb5..7d9cc0c 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,32 +1,31 @@
 # Modified from https://github.com/biocore/scikit-bio/
 language: python
-python:
-  - "2.7"
 env:
-  - NUMPY_VERSION=1.7
-  - NUMPY_VERSION=1.8
-  - NUMPY_VERSION=1.8 USE_H5PY=True
-  - NUMPY_VERSION=1.8 USE_CYTHON=True
-  - NUMPY_VERSION=1.7 USE_H5PY=True
+  - PYTHON_VERSION=2.7 USE_H5PY=True NOSE_ARGS="--with-doctest --with-coverage"
+  - PYTHON_VERSION=2.7 USE_CYTHON=True NOSE_ARGS="--with-doctest --with-coverage"
+  - PYTHON_VERSION=3.4 USE_H5PY=True
+  - PYTHON_VERSION=3.4 USE_CYTHON=True
+  - PYTHON_VERSION=3.5 USE_H5PY=True
+  - PYTHON_VERSION=3.5 USE_CYTHON=True
 before_install:
-  - wget http://repo.continuum.io/miniconda/Miniconda3-3.7.3-Linux-x86_64.sh -O miniconda.sh
+  - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
   - chmod +x miniconda.sh
   - ./miniconda.sh -b
   - export PATH=/home/travis/miniconda3/bin:$PATH
-  # Update conda itself
-  - conda update --yes conda
 install:
-  - conda create --yes -n env_name python=$TRAVIS_PYTHON_VERSION pip numpy=$NUMPY_VERSION scipy nose pep8 Sphinx coverage
+  - conda create --yes -n env_name python=$PYTHON_VERSION pip click numpy scipy nose pep8 flake8 coverage future
   - if [ ${USE_CYTHON} ]; then conda install --yes -n env_name cython; fi
   - if [ ${USE_H5PY} ]; then conda install --yes -n env_name h5py>=2.2.0; fi
+  - if [ ${PYTHON_VERSION} = "2.7" ]; then conda install --yes -n env_name Sphinx=1.2.2; fi
   - source activate env_name
-  - pip install coveralls pyqi
+  - if [ ${PYTHON_VERSION} = "2.7" ]; then pip install pyqi; fi
+  - pip install coveralls
   - pip install -e . --no-deps
 script:
-  - nosetests --with-doctest --with-coverage
-  - pep8 biom setup.py
+  - nosetests ${NOSE_ARGS}
+  - flake8 biom setup.py
   - biom show-install-info
-  - make -C doc html
+  - if [ ${PYTHON_VERSION} = "2.7" ]; then make -C doc html; fi
   # we can only validate the tables if we have H5PY
   - if [ ${USE_H5PY} ]; then for table in examples/*hdf5.biom; do echo ${table}; biom validate-table -i ${table}; done; fi
   # validate JSON formatted tables
diff --git a/ChangeLog.md b/ChangeLog.md
index eebf2fe..0eeec8e 100644
--- a/ChangeLog.md
+++ b/ChangeLog.md
@@ -1,6 +1,37 @@
 BIOM-Format ChangeLog
 =====================
 
+biom 2.1.5
+----------
+
+New features and bug fixes, released on 21 October 2015.
+
+Changes:
+
+* Codebase is now Python 2/3 compatible. It is currently tested with Python
+  versions 2.7, 3.4 and 3.5.
+* `biom-serve` and the accompanying html interface has been removed.
+
+New Features:
+
+* `Table.head` has been added to retrieve the first few rows and or columns
+  from a table. This can be accessed through the new ``biom head`` command.
+  See [issue #639](https://github.com/biocore/biom-format/issues/639).
+* ``biom.parse.from_uc`` has been added to support creation of ``biom.Table``
+  objects from vsearch/uclust/usearch ``.uc`` files. This can be accessed
+  through the new ``biom from-uc`` command. See
+  [issue #648](https://github.com/biocore/biom-format/issues/648).
+* Codebase now uses [click](http://click.pocoo.org) instead of
+  [pyqi](https://github.com/biocore/pyqi) for its command line interface.
+  See [issue #631](https://github.com/biocore/biom-format/issues/631).
+
+Bug fixes:
+
+* `Table.update_ids` strict check was too aggressive. See
+ [issue #633](https://github.com/biocore/biom-format/issues/633).
+* `biom --version` now prints the software version (previously the individual
+  commands did this, but not the base command).
+
 biom 2.1.4
 ----------
 
@@ -10,7 +41,7 @@ Changes:
 
 * Codebase updated to reflect pep8 1.6.x
 
-Changes:
+New features:
 
 * `Table.to_hdf5` and `Table.from_hdf5` now support custom parsers and
     formatters, see issue #608
diff --git a/INSTALL b/INSTALL
deleted file mode 100644
index 08c4af0..0000000
--- a/INSTALL
+++ /dev/null
@@ -1,24 +0,0 @@
-INSTALL
-=======
-
-More details can be found at http://biom-format.org
-
-To build, simply run:
-
-$ python setup.py build
-
-To install into your home directory:
-
-$ python setup.py install --prefix=$HOME
-
-To install system-wide:
-
-$ python setup.py install
-
-If you have Sphinx installed, you can build the documentation locally with:
-
-$ cd doc; make html
-
-If you have nose installed, you can run the unit tests with:
-
-$ nosetests
diff --git a/MANIFEST.in b/MANIFEST.in
index 5e27d9a..7f908c9 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -5,7 +5,6 @@ include ChangeLog.md
 graft biom
 graft support_files
 graft examples
-graft scripts
 graft doc
 
 prune docs/_build
diff --git a/README.md b/README.md
index 0c347fc..6aba07f 100644
--- a/README.md
+++ b/README.md
@@ -12,5 +12,4 @@ Further details can be found at http://biom-format.org.
 Getting help
 ------------
 
-To get help with biom, you should use the [biom](http://stackoverflow.com/questions/tagged/biom) tag on StackOverflow (SO). Before posting a question, check out SO's guide on how to [ask a question](http://stackoverflow.com/questions/how-to-ask). The biom-format developers regularly monitor the `biom` SO tag.
-
+To get help with biom, you should use the [biom](http://stackoverflow.com/questions/tagged/biom) tag on StackOverflow (SO), or post the the [QIIME Forum](http://forum.qiime.org). Before posting a question, check out SO's guide on how to [ask a question](http://stackoverflow.com/questions/how-to-ask). The biom-format developers regularly monitor the `biom` SO tag.
diff --git a/biom/__init__.py b/biom/__init__.py
index 4266584..388709c 100755
--- a/biom/__init__.py
+++ b/biom/__init__.py
@@ -41,7 +41,7 @@ either in TSV, HDF5, JSON, gzip'd JSON or gzip'd TSV and parse accordingly:
 
 """
 # ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
@@ -73,4 +73,5 @@ example_table = Table([[0, 1, 2], [3, 4, 5]], ['O1', 'O2'],
                        {'environment': 'A'}], input_is_dense=True)
 
 
-__all__ = ['Table', 'example_table', 'parse_table', 'load_table']
+__all__ = ['Table', 'example_table', 'parse_table', 'load_table',
+           '__format_version__', '__version__']
diff --git a/biom/cli/__init__.py b/biom/cli/__init__.py
new file mode 100644
index 0000000..1cbc08c
--- /dev/null
+++ b/biom/cli/__init__.py
@@ -0,0 +1,31 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import division
+
+from importlib import import_module
+
+import click
+import biom
+
+
+ at click.group()
+ at click.version_option(version=biom.__version__)
+def cli():
+    pass
+
+
+import_module('biom.cli.table_summarizer')
+import_module('biom.cli.metadata_adder')
+import_module('biom.cli.table_converter')
+import_module('biom.cli.installation_informer')
+import_module('biom.cli.table_subsetter')
+import_module('biom.cli.table_normalizer')
+import_module('biom.cli.table_head')
+import_module('biom.cli.table_validator')
+import_module('biom.cli.uc_processor')
diff --git a/biom/cli/installation_informer.py b/biom/cli/installation_informer.py
new file mode 100644
index 0000000..9e3a999
--- /dev/null
+++ b/biom/cli/installation_informer.py
@@ -0,0 +1,121 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from __future__ import division
+
+import sys
+
+import click
+
+from biom.cli import cli
+
+
+ at cli.command(name='show-install-info')
+def show_install_info():
+    """Provide information about the biom-format installation.
+
+    Provide information about the biom-format installation, including settings
+    pulled from the configuration file. For more details, see
+    http://biom-format.org
+
+    Example usage:
+
+    Display biom-format installation information:
+
+    $ biom show-install-info
+
+    """
+    click.echo(_show_install_info())
+
+
+def _show_install_info():
+    lines = []
+    lines.extend(_get_formatted_system_info())
+    lines.extend(_get_formatted_dependency_version_info())
+    lines.extend(_get_formatted_package_info())
+    lines.append('')
+    return '\n'.join(lines)
+
+
+def _get_formatted_system_info():
+    return _format_info(_get_system_info(), 'System information')
+
+
+def _get_formatted_dependency_version_info():
+    return _format_info(_get_dependency_version_info(), 'Dependency versions')
+
+
+def _get_formatted_package_info():
+    return _format_info(_get_package_info(), 'biom-format package information')
+
+
+def _get_system_info():
+    return (("Platform", sys.platform),
+            ("Python version", sys.version.replace('\n', ' ')),
+            ("Python executable", sys.executable))
+
+
+def _get_dependency_version_info():
+    not_installed_msg = "Not installed"
+
+    try:
+        from click import __version__ as click_lib_version
+    except ImportError:
+        click_lib_version = not_installed_msg
+
+    try:
+        from numpy import __version__ as numpy_lib_version
+    except ImportError:
+        numpy_lib_version = ("ERROR: Not installed - this is required! "
+                             "(This will also cause the BIOM library to "
+                             "not be importable.)")
+
+    try:
+        from scipy import __version__ as scipy_lib_version
+    except ImportError:
+        scipy_lib_version = not_installed_msg
+
+    try:
+        from h5py import __version__ as h5py_lib_version
+    except ImportError:
+        h5py_lib_version = ("WARNING: Not installed - this is an optional "
+                            "dependency. It is strongly recommended for "
+                            "large datasets.")
+
+    return (("click version", click_lib_version),
+            ("NumPy version", numpy_lib_version),
+            ("SciPy version", scipy_lib_version),
+            ("h5py version", h5py_lib_version))
+
+
+def _get_package_info():
+    import_error_msg = ("ERROR: Can't find the BIOM library code (or "
+                        "numpy) - is it installed and in your "
+                        "$PYTHONPATH?")
+    try:
+        from biom import __version__ as biom_lib_version
+    except ImportError:
+        biom_lib_version = import_error_msg
+
+    return (("biom-format version", biom_lib_version),)
+
+
+def _format_info(info, title):
+    max_len = _get_max_length(info)
+
+    lines = ['']
+    lines.append(title)
+    lines.append('=' * len(title))
+    for e in info:
+        lines.append("%*s:\t%s" % (max_len, e[0], e[1]))
+
+    return lines
+
+
+def _get_max_length(info):
+    return max([len(e[0]) for e in info])
diff --git a/biom/cli/metadata_adder.py b/biom/cli/metadata_adder.py
new file mode 100644
index 0000000..96e2660
--- /dev/null
+++ b/biom/cli/metadata_adder.py
@@ -0,0 +1,186 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from __future__ import division
+
+import click
+
+from biom import load_table
+from biom.cli import cli
+from biom.cli.util import write_biom_table
+from biom.parse import MetadataMap
+from biom.util import HAVE_H5PY
+
+
+ at cli.command(name='add-metadata')
+ at click.option('-i', '--input-fp', required=True,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The input BIOM table')
+ at click.option('-o', '--output-fp', required=True,
+              type=click.Path(exists=False, dir_okay=False),
+              help='The output BIOM table')
+ at click.option('-m', '--sample-metadata-fp', required=False,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The sample metadata mapping file (will add sample '
+                   'metadata to the input BIOM table, if provided).')
+ at click.option('--observation-metadata-fp', required=False,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The observation metadata mapping file (will add '
+                   'observation metadata to the input BIOM table, if '
+                   'provided).')
+ at click.option('--sc-separated', required=False, type=click.STRING,
+              help='Comma-separated list of the metadata fields to split '
+                   'on semicolons. This is useful for hierarchical data such '
+                   'as taxonomy or functional categories.')
+ at click.option('--sc-pipe-separated', required=False, type=click.STRING,
+              help='Comma-separated list of the metadata fields to split '
+                   'on semicolons and pipes ("|"). This is useful for '
+                   'hierarchical data such as functional categories with '
+                   'one-to-many mappings (e.g. x;y;z|x;y;w)).')
+ at click.option('--int-fields', required=False, type=click.STRING,
+              help='Comma-separated list of the metadata fields to cast '
+                   'to integers. This is useful for integer data such as '
+                   '"DaysSinceStart".')
+ at click.option('--float-fields', required=False, type=click.STRING,
+              help='Comma-separated list of the metadata fields to cast '
+                   'to floating point numbers. This is useful for real number '
+                   'data such as "pH".')
+ at click.option('--sample-header', required=False, type=click.STRING,
+              help='Comma-separated list of the sample metadata field '
+                   'names. This is useful if a header line is not provided '
+                   'with the metadata, if you want to rename the fields, or '
+                   'if you want to include only the first n fields where n is '
+                   'the number of entries provided here.')
+ at click.option('--observation-header', required=False, type=click.STRING,
+              help='Comma-separated list of the observation metadata '
+                   'field names. This is useful if a header line is not '
+                   'provided with the metadata, if you want to rename the '
+                   'fields, or if you want to include only the first n fields '
+                   'where n is the number of entries provided here.')
+ at click.option('--output-as-json', default=not HAVE_H5PY, is_flag=True,
+              help='Write the output file in JSON format.')
+def add_metadata(input_fp, output_fp, sample_metadata_fp,
+                 observation_metadata_fp, sc_separated, sc_pipe_separated,
+                 int_fields, float_fields, sample_header, observation_header,
+                 output_as_json):
+    """Add metadata to a BIOM table.
+
+    Add sample and/or observation metadata to BIOM-formatted files. See
+    examples here: http://biom-format.org/documentation/adding_metadata.html
+
+    Example usage:
+
+    Add sample metadata to a BIOM table:
+
+    $ biom add-metadata -i otu_table.biom -o table_with_sample_metadata.biom
+      -m sample_metadata.txt
+    """
+    table = load_table(input_fp)
+    if sample_metadata_fp is not None:
+        sample_metadata_f = open(sample_metadata_fp, 'U')
+    else:
+        sample_metadata_f = None
+    if observation_metadata_fp is not None:
+        observation_metadata_f = open(observation_metadata_fp, 'U')
+    else:
+        observation_metadata_f = None
+    if sc_separated is not None:
+        sc_separated = sc_separated.split(',')
+    if sc_pipe_separated is not None:
+        sc_pipe_separated = sc_pipe_separated.split(',')
+    if int_fields is not None:
+        int_fields = int_fields.split(',')
+    if float_fields is not None:
+        float_fields = float_fields.split(',')
+    if sample_header is not None:
+        sample_header = sample_header.split(',')
+    if observation_header is not None:
+        observation_header = observation_header.split(',')
+
+    result = _add_metadata(table, sample_metadata_f, observation_metadata_f,
+                           sc_separated, sc_pipe_separated, int_fields,
+                           float_fields, sample_header, observation_header)
+
+    if output_as_json:
+        fmt = 'json'
+    else:
+        fmt = 'hdf5'
+
+    write_biom_table(result, fmt, output_fp)
+
+
+def _split_on_semicolons(x):
+    return [e.strip() for e in x.split(';')]
+
+
+def _split_on_semicolons_and_pipes(x):
+    return [[e.strip() for e in y.split(';')] for y in x.split('|')]
+
+
+def _int(x):
+    try:
+        return int(x)
+    except ValueError:
+        return x
+
+
+def _float(x):
+    try:
+        return float(x)
+    except ValueError:
+        return x
+
+
+def _add_metadata(table, sample_metadata=None, observation_metadata=None,
+                  sc_separated=None, sc_pipe_separated=None, int_fields=None,
+                  float_fields=None, sample_header=None,
+                  observation_header=None):
+
+    if sample_metadata is None and observation_metadata is None:
+        raise ValueError('Must specify sample_metadata and/or '
+                         'observation_metadata.')
+
+    # define metadata processing functions, if any
+    process_fns = {}
+    if sc_separated is not None:
+        process_fns.update(dict.fromkeys(sc_separated,
+                                         _split_on_semicolons))
+
+    if sc_pipe_separated is not None:
+        process_fns.update(dict.fromkeys(sc_pipe_separated,
+                           _split_on_semicolons_and_pipes))
+
+    if int_fields is not None:
+        process_fns.update(dict.fromkeys(int_fields, _int))
+
+    if float_fields is not None:
+        process_fns.update(dict.fromkeys(float_fields, _float))
+
+    # parse mapping files
+    if sample_metadata is not None:
+        sample_metadata = MetadataMap.from_file(sample_metadata,
+                                                process_fns=process_fns,
+                                                header=sample_header)
+
+    if observation_metadata is not None:
+        observation_metadata = MetadataMap.from_file(
+            observation_metadata,
+            process_fns=process_fns,
+            header=observation_header)
+
+    # NAUGHTY: this is modifying the input table IN PLACE!!! And then
+    # RETURNING IT! MetadataAdder is angry!
+
+    # add metadata as necessary
+    if sample_metadata:
+        table.add_metadata(sample_metadata, axis='sample')
+
+    if observation_metadata:
+        table.add_metadata(observation_metadata, axis='observation')
+
+    return table
diff --git a/biom/cli/table_converter.py b/biom/cli/table_converter.py
new file mode 100644
index 0000000..2506e0d
--- /dev/null
+++ b/biom/cli/table_converter.py
@@ -0,0 +1,209 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from __future__ import division
+
+import click
+
+from biom import load_table
+from biom.cli import cli
+from biom.cli.util import write_biom_table
+from biom.parse import MetadataMap
+
+
+table_types = ["OTU table",
+               "Pathway table",
+               "Function table",
+               "Ortholog table",
+               "Gene table",
+               "Metabolite table",
+               "Taxon table",
+               "Table"]
+
+observation_metadata_types = {
+    'sc_separated': lambda x: [e.strip() for e in x.split(';')],
+    'naive': lambda x: x
+}
+observation_metadata_types['taxonomy'] = \
+    observation_metadata_types['sc_separated']
+
+observation_metadata_formatters = {
+    'sc_separated': lambda x: '; '.join(x),
+    'naive': lambda x: x
+}
+
+
+ at cli.command(name='convert')
+ at click.option('-i', '--input-fp', required=True,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The input BIOM table')
+ at click.option('-o', '--output-fp', required=True,
+              type=click.Path(exists=False, dir_okay=False),
+              help='The output BIOM table')
+ at click.option('-m', '--sample-metadata-fp', required=False,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The sample metadata mapping file (will add sample '
+                   'metadata to the input BIOM table, if provided).')
+ at click.option('--observation-metadata-fp', required=False,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The observation metadata mapping file (will add '
+                   'observation metadata to the input BIOM table, if '
+                   'provided).')
+ at click.option('--to-json', default=False, is_flag=True,
+              help='Output as JSON-formatted table.')
+ at click.option('--to-hdf5', default=False, is_flag=True,
+              help='Output as HDF5-formatted table.')
+ at click.option('--to-tsv', default=False, is_flag=True,
+              help='Output as TSV-formatted (classic) table.')
+ at click.option('--collapsed-samples', default=False, is_flag=True,
+              help='If --to_hdf5 is passed and the original table is a '
+                   'BIOM table with collapsed samples, this will '
+                   'update the sample metadata of the table to '
+                   'the supported HDF5 collapsed format.')
+ at click.option('--collapsed-observations', default=False, is_flag=True,
+              help='If --to_hdf5 is passed and the original table is a '
+                   'BIOM table with collapsed observations, this will '
+                   'update the observation metadata of the table '
+                   'to the supported HDF5 collapsed format.')
+ at click.option('--header-key', required=False, type=click.STRING,
+              help='The observation metadata to include from the input '
+                   'BIOM table file when creating a tsv table file. '
+                   'By default no observation metadata will be included.')
+ at click.option('--output-metadata-id', required=False, type=click.STRING,
+              help='The name to be given to the observation metadata '
+                   'column when creating a tsv table file if the column '
+                   'should be renamed.')
+ at click.option('--table-type', required=False,
+              type=click.Choice(table_types),
+              help='The type of the table.')
+ at click.option('--process-obs-metadata', required=False,
+              type=click.Choice(
+                observation_metadata_types),
+              help='Process metadata associated with observations when '
+              'converting from a classic table.')
+ at click.option('--tsv-metadata-formatter', required=False,
+              default='sc_separated',
+              type=click.Choice(
+                observation_metadata_formatters),
+              help='Method for formatting the observation metadata.')
+def convert(input_fp, output_fp, sample_metadata_fp, observation_metadata_fp,
+            to_json, to_hdf5, to_tsv, collapsed_samples,
+            collapsed_observations, header_key, output_metadata_id, table_type,
+            process_obs_metadata, tsv_metadata_formatter):
+    """Convert to/from the BIOM table format.
+
+    Convert between BIOM table formats. See examples here:
+    http://biom-format.org/documentation/biom_conversion.html
+
+    Example usage:
+
+    Convert a "classic" BIOM file (tab-separated text) to an HDF5 BIOM
+    formatted OTU table:
+
+    $ biom convert -i table.txt -o table.biom --to-hdf5
+    """
+    if sum([to_tsv, to_hdf5, to_json]) > 1:
+        raise ValueError("--to-tsv, --to-json, and --to-hdf5 are mutually "
+                         "exclusive. You can only pass one of these options.")
+
+    table = load_table(input_fp)
+    if sample_metadata_fp is not None:
+        with open(sample_metadata_fp, 'U') as f:
+            sample_metadata_f = MetadataMap.from_file(f)
+    else:
+        sample_metadata_f = None
+    if observation_metadata_fp is not None:
+        with open(observation_metadata_fp, 'U') as f:
+            observation_metadata_f = MetadataMap.from_file(f)
+    else:
+        observation_metadata_f = None
+
+    _convert(table, output_fp, sample_metadata_f, observation_metadata_f,
+             to_json, to_hdf5, to_tsv, collapsed_samples,
+             collapsed_observations, header_key, output_metadata_id,
+             table_type, process_obs_metadata, tsv_metadata_formatter)
+
+
+def _convert(table, output_filepath, sample_metadata=None,
+             observation_metadata=None, to_json=False, to_hdf5=False,
+             to_tsv=False, collapsed_samples=False,
+             collapsed_observations=False, header_key=None,
+             output_metadata_id=None, table_type=None,
+             process_obs_metadata=None, tsv_metadata_formatter='sc_separated'):
+
+    if sum([to_tsv, to_hdf5, to_json]) == 0:
+        raise ValueError("Must specify an output format")
+    elif sum([to_tsv, to_hdf5, to_json]) > 1:
+        raise ValueError("Can only specify a single output format")
+
+    if table_type is None:
+        if table.type in [None, "None"]:
+            table.type = "Table"
+        else:
+            pass
+    else:
+        table.type = table_type
+
+    if tsv_metadata_formatter is not None:
+        obs_md_fmt_f = observation_metadata_formatters[tsv_metadata_formatter]
+
+    if sample_metadata is not None:
+        table.add_metadata(sample_metadata)
+
+    # if the user does not specify a name for the output metadata column,
+    # set it to the same as the header key
+    output_metadata_id = output_metadata_id or header_key
+
+    if process_obs_metadata is not None and not to_tsv:
+        if table.metadata(axis='observation') is None:
+            raise ValueError("Observation metadata processing requested "
+                             "but it doesn't appear that there is any "
+                             "metadata to operate on!")
+
+        # and if this came in as TSV, then we expect only a single type of
+        # metadata
+        md_key = list(table.metadata(axis='observation')[0].keys())[0]
+
+        process_f = observation_metadata_types[process_obs_metadata]
+        it = zip(table.ids(axis='observation'),
+                 table.metadata(axis='observation'))
+        new_md = {id_: {md_key: process_f(md[md_key])} for id_, md in it}
+
+        if observation_metadata:
+            for k, v in observation_metadata.items():
+                new_md[k].update(v)
+        table.add_metadata(new_md, 'observation')
+
+    if to_tsv:
+        result = table.to_tsv(header_key=header_key,
+                              header_value=output_metadata_id,
+                              metadata_formatter=obs_md_fmt_f)
+        with open(output_filepath, 'w') as f:
+            f.write(result)
+        return
+    elif to_json:
+        fmt = 'json'
+        result = table
+    elif to_hdf5:
+        fmt = 'hdf5'
+        result = table
+        if collapsed_observations:
+            metadata = [{'collapsed_ids': sorted(md.keys())}
+                        for md in result.metadata(axis='observation')]
+            result._observation_metadata = metadata
+        if collapsed_samples:
+            metadata = [{'collapsed_ids': sorted(md.keys())}
+                        for md in result.metadata()]
+            result._sample_metadata = metadata
+        if collapsed_observations or collapsed_samples:
+            # We have changed the metadata, it is safer to make sure that
+            # it is correct
+            result._cast_metadata()
+    write_biom_table(result, fmt, output_filepath)
+
+    return
diff --git a/biom/cli/table_head.py b/biom/cli/table_head.py
new file mode 100644
index 0000000..9d924c7
--- /dev/null
+++ b/biom/cli/table_head.py
@@ -0,0 +1,46 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2011-2013, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import division
+
+import click
+
+from biom import load_table
+from biom.cli import cli
+
+
+ at cli.command()
+ at click.option('-i', '--input-fp', required=True,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The input BIOM table')
+ at click.option('-o', '--output-fp', default=None,
+              type=click.Path(writable=True),
+              help='An output file-path', required=False)
+ at click.option('-n', '--n-obs', default=5, type=int,
+              help="The number of observations to show",
+              required=False)
+ at click.option('-m', '--n-samp', default=5, type=int,
+              help="The number of samples to show",
+              required=False)
+def head(input_fp, output_fp, n_obs, n_samp):
+    """Dump the first bit of a table.
+
+    Example usage:
+
+    Print out the upper left corner of a BIOM table to standard out:
+
+    $ biom head -i table.biom
+
+    """
+    table = load_table(input_fp).head(n=n_obs, m=n_samp)
+
+    if output_fp is None:
+        click.echo(str(table))
+    else:
+        with open(output_fp, 'w') as fp:
+            fp.write(str(table))
diff --git a/biom/cli/table_normalizer.py b/biom/cli/table_normalizer.py
new file mode 100755
index 0000000..cad6ebf
--- /dev/null
+++ b/biom/cli/table_normalizer.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+# ----------------------------------------------------------------------------
+# Copyright (c) 2011-2013, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import division
+
+import click
+
+from biom import load_table
+from biom.cli import cli
+from biom.cli.util import write_biom_table
+from biom.util import HAVE_H5PY
+
+
+ at cli.command(name='normalize-table')
+ at click.option('-i', '--input-fp', required=True,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The input BIOM table')
+ at click.option('-o', '--output-fp', default=None,
+              type=click.Path(writable=True),
+              help='An output file-path')
+ at click.option('-r', '--relative-abund', default=False, is_flag=True,
+              help='convert table to relative abundance',
+              required=False)
+ at click.option('-p', '--presence-absence', default=False, is_flag=True,
+              help='convert table to presence/absence',
+              required=False)
+ at click.option('-a', '--axis', default='sample',
+              type=click.Choice(['sample', 'observation']),
+              help='The axis to normalize over')
+def normalize_table(input_fp, output_fp, relative_abund, presence_absence,
+                    axis):
+    """Normalize a BIOM table.
+
+    Normalize the values of a BIOM table through various methods. Relative
+    abundance will take the relative abundance of each observation in terms of
+    samples or observations.  Presence absensece will convert observations to
+    1's and 0's based on presence of the observation.
+
+    Example usage:
+
+    Normalizing a BIOM table to relative abundnace:
+
+    $ biom normalize-table -i table.biom -r -o normalized_table.biom
+
+    Converting a BIOM table to a presence/absence table:
+
+    $ biom normalize-table -i table.biom -p -o converted_table.biom
+    """
+    table = load_table(input_fp)
+    result = _normalize_table(table, relative_abund, presence_absence, axis)
+
+    write_biom_table(result, 'hdf5' if HAVE_H5PY else 'json', output_fp)
+
+
+def _normalize_table(table, relative_abund=False, presence_absence=False,
+                     axis='sample'):
+    if relative_abund is False and presence_absence is False:
+        raise ValueError("Must specifiy a normalization type")
+    elif relative_abund is True and presence_absence is True:
+        raise ValueError("Must specify only one normalization type")
+
+    if relative_abund:
+        table.norm(axis=axis)
+    else:
+        table.pa()
+
+    return table
diff --git a/biom/cli/table_subsetter.py b/biom/cli/table_subsetter.py
new file mode 100644
index 0000000..fa056f2
--- /dev/null
+++ b/biom/cli/table_subsetter.py
@@ -0,0 +1,139 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from __future__ import division
+
+import click
+
+from biom.cli import cli
+from biom.parse import (get_axis_indices, direct_slice_data, direct_parse_key,
+                        generatedby)
+from biom.table import Table
+from biom.util import biom_open, HAVE_H5PY
+
+
+ at cli.command(name='subset-table')
+ at click.option('-i', '--input-hdf5-fp', default=None,
+              type=click.Path(exists=True, dir_okay=False),
+              help='the input hdf5 BIOM table filepath to subset')
+ at click.option('-j', '--input-json-fp', default=None,
+              type=click.Path(exists=True, dir_okay=False),
+              help='the input json BIOM table filepath to subset')
+ at click.option('-a', '--axis', required=True,
+              type=click.Choice(['sample', 'observation']),
+              help='the axis to subset over, either sample or observation')
+ at click.option('-s', '--ids', required=True,
+              type=click.Path(exists=True, dir_okay=False),
+              help='a file containing a single column of IDs to retain '
+                   '(either sample IDs or observation IDs, depending on the '
+                   'axis)')
+ at click.option('-o', '--output-fp', required=True,
+              type=click.Path(writable=True, dir_okay=False),
+              help='the output BIOM table filepath')
+def subset_table(input_hdf5_fp, input_json_fp, axis, ids, output_fp):
+    """Subset a BIOM table.
+
+    Subset a BIOM table, over either observations or samples, without fully
+    parsing it. This command is intended to assist in working with very large
+    tables when tight on memory, or as a lightweight way to subset a full
+    table. Currently, it is possible to produce tables with rows or columns
+    (observations or samples) that are fully zeroed.
+
+    Example usage:
+
+    Choose a subset of the observations in table.biom (JSON) and write them to
+    subset.biom:
+
+    $ biom subset-table -j table.biom -a observations -s observation_ids.txt \
+           -o subset.biom
+
+    Choose a subset of the observations in table.biom (HDF5) and write them to
+    subset.biom:
+
+    $ biom subset-table -i table.biom -a observations -s observation_ids.txt \
+           -o subset.biom
+
+    """
+    if input_json_fp is not None:
+        with open(input_json_fp, 'U') as f:
+            input_json_fp = f.read()
+
+    with open(ids, 'U') as f:
+        ids = [line.strip() for line in f]
+
+    table, format_ = _subset_table(input_hdf5_fp, input_json_fp, axis, ids)
+
+    if format_ == 'json':
+        with open(output_fp, 'w') as f:
+            for line in table:
+                f.write(line)
+                f.write('\n')
+    else:
+        if HAVE_H5PY:
+            import h5py
+        else:
+            # This should never be raised here
+            raise ImportError("h5py is not available, cannot write HDF5!")
+
+        with h5py.File(output_fp, 'w') as f:
+            table.to_hdf5(f, generatedby())
+
+
+def _subset_table(hdf5_biom, json_table_str, axis, ids):
+    if axis not in ['sample', 'observation']:
+        raise ValueError("Invalid axis '%s'. Must be either 'sample' or "
+                         "'observation'." % axis)
+
+    if hdf5_biom is None and json_table_str is None:
+        raise ValueError("Must specify an input table")
+    elif hdf5_biom is not None and json_table_str is not None:
+        raise ValueError("Can only specify one input table")
+
+    if json_table_str is not None:
+        idxs, new_axis_md = get_axis_indices(json_table_str, ids, axis)
+        new_data = direct_slice_data(json_table_str, idxs, axis)
+
+        # multiple walks over the string. bad form, but easy right now
+        # ...should add a yield_and_ignore parser or something.
+        def subset_generator():
+            yield "{"
+            yield direct_parse_key(json_table_str, "id")
+            yield ","
+            yield direct_parse_key(json_table_str, "format")
+            yield ","
+            yield direct_parse_key(json_table_str, "format_url")
+            yield ","
+            yield direct_parse_key(json_table_str, "type")
+            yield ","
+            yield direct_parse_key(json_table_str, "generated_by")
+            yield ","
+            yield direct_parse_key(json_table_str, "date")
+            yield ","
+            yield direct_parse_key(json_table_str, "matrix_type")
+            yield ","
+            yield direct_parse_key(json_table_str, "matrix_element_type")
+            yield ","
+            yield new_data
+            yield ","
+            yield new_axis_md
+            yield ","
+
+            if axis == "observation":
+                yield direct_parse_key(json_table_str, "columns")
+            else:
+                yield direct_parse_key(json_table_str, "rows")
+            yield "}"
+
+        format_ = 'json'
+        table = subset_generator()
+    else:
+        with biom_open(hdf5_biom) as f:
+            table = Table.from_hdf5(f, ids=ids, axis=axis)
+        format_ = 'hdf5'
+
+    return table, format_
diff --git a/biom/cli/table_summarizer.py b/biom/cli/table_summarizer.py
new file mode 100644
index 0000000..77a0778
--- /dev/null
+++ b/biom/cli/table_summarizer.py
@@ -0,0 +1,136 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from __future__ import division
+
+from operator import itemgetter
+
+import click
+from numpy import std
+
+from biom import load_table
+from biom.cli import cli
+from biom.util import compute_counts_per_sample_stats
+
+
+ at cli.command(name='summarize-table')
+ at click.option('-i', '--input-fp', required=True,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The input BIOM table')
+ at click.option('-o', '--output-fp', default=None,
+              type=click.Path(writable=True, dir_okay=False),
+              help='An output file-path')
+ at click.option('--qualitative', default=False, is_flag=True,
+              help="Present counts as number of unique observation ids per"
+                   " sample, rather than counts of observations per sample.")
+ at click.option('--observations', default=False, is_flag=True,
+              help="Summarize over observations")
+def summarize_table(input_fp, output_fp, qualitative, observations):
+    """Summarize sample or observation data in a BIOM table.
+
+    Provides details on the observation counts per sample, including summary
+    statistics, as well as metadata categories associated with samples and
+    observations.
+
+    Example usage:
+
+    Write a summary of table.biom to table_summary.txt:
+
+    $ biom summarize-table -i table.biom -o table_summary.txt
+
+    """
+    table = load_table(input_fp)
+    result = _summarize_table(table, qualitative, observations)
+    if output_fp:
+        with open(output_fp, 'w') as fh:
+            fh.write(result)
+    else:
+        click.echo(result)
+
+
+def _summarize_table(table, qualitative=False, observations=False):
+    lines = []
+
+    if observations:
+        table = table.transpose()
+
+    min_counts, max_counts, median_counts, mean_counts, counts_per_samp =\
+        compute_counts_per_sample_stats(table, qualitative)
+    num_observations = len(table.ids(axis='observation'))
+
+    counts_per_sample_values = list(counts_per_samp.values())
+
+    if table.metadata() is None:
+        sample_md_keys = ["None provided"]
+    else:
+        sample_md_keys = table.metadata()[0].keys()
+
+    if table.metadata(axis='observation') is None:
+        observation_md_keys = ["None provided"]
+    else:
+        observation_md_keys = table.metadata(axis='observation')[0].keys()
+
+    num_samples = len(table.ids())
+
+    if observations:
+        # as this is a transpose of the original table...
+        lines.append('Num samples: %d' % num_observations)
+        lines.append('Num observations: %d' % num_samples)
+    else:
+        lines.append('Num samples: %d' % num_samples)
+        lines.append('Num observations: %d' % num_observations)
+
+    if not qualitative:
+        total_count = sum(counts_per_sample_values)
+        lines.append('Total count: %d' % total_count)
+        lines.append('Table density (fraction of non-zero values): %1.3f' %
+                     table.get_table_density())
+
+    lines.append('')
+
+    if qualitative:
+        if observations:
+            lines.append('Sample/observations summary:')
+        else:
+            lines.append('Observations/sample summary:')
+    else:
+        lines.append('Counts/sample summary:')
+
+    lines.append(' Min: %r' % min_counts)
+    lines.append(' Max: %r' % max_counts)
+    lines.append(' Median: %1.3f' % median_counts)
+    lines.append(' Mean: %1.3f' % mean_counts)
+    lines.append(' Std. dev.: %1.3f' % std(counts_per_sample_values))
+
+    if observations:
+        # since this is a transpose...
+        lines.append(
+            ' Sample Metadata Categories: %s' %
+            '; '.join(observation_md_keys))
+        lines.append(
+            ' Observation Metadata Categories: %s' %
+            '; '.join(sample_md_keys))
+        lines.append('')
+    else:
+        lines.append(
+            ' Sample Metadata Categories: %s' %
+            '; '.join(sample_md_keys))
+        lines.append(
+            ' Observation Metadata Categories: %s' %
+            '; '.join(observation_md_keys))
+        lines.append('')
+
+    if qualitative:
+        lines.append('Observations/sample detail:')
+    else:
+        lines.append('Counts/sample detail:')
+
+    for k, v in sorted(counts_per_samp.items(), key=itemgetter(1)):
+        lines.append('%s: %r' % (k, v))
+
+    return "\n".join(lines)
diff --git a/biom/commands/table_validator.py b/biom/cli/table_validator.py
similarity index 89%
rename from biom/commands/table_validator.py
rename to biom/cli/table_validator.py
index 0e39628..ee90005 100644
--- a/biom/commands/table_validator.py
+++ b/biom/cli/table_validator.py
@@ -15,58 +15,63 @@ from datetime import datetime
 from operator import and_
 from functools import reduce
 
+import click
 import numpy as np
-from pyqi.core.command import (Command, CommandIn, CommandOut,
-                               ParameterCollection)
 
+from biom.cli import cli
 from biom.util import HAVE_H5PY, biom_open, is_hdf5_file
 
 
-__author__ = "Daniel McDonald"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Daniel McDonald", "Jose Clemente", "Greg Caporaso",
-               "Jai Ram Rideout", "Justin Kuczynski", "Andreas Wilke",
-               "Tobias Paczian", "Rob Knight", "Folker Meyer", "Sue Huse",
-               "Jorge Cañardo Alastuey"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__author__ = "Daniel McDonald"
-__email__ = "daniel.mcdonald at colorado.edu"
-
-
-class TableValidator(Command):
-    BriefDescription = "Validate a BIOM-formatted file"
-    LongDescription = ("Test a file for adherence to the Biological "
-                       "Observation Matrix (BIOM) format specification. This "
-                       "specification is defined at http://biom-format.org")
-
-    CommandIns = ParameterCollection([
-        CommandIn(Name='table', DataType=object,
-                  Description='the input BIOM JSON object (e.g., the output '
-                  'of json.load)', Required=True),
-        CommandIn(Name='format_version', DataType=str,
-                  Description='the specific format version to validate '
-                  'against', Required=False, Default=None),
-        CommandIn(Name='detailed_report', DataType=bool,
-                  Description='include more details in the output report',
-                  Required=False, Default=False)
-    ])
-
-    CommandOuts = ParameterCollection([
-        CommandOut(Name='valid_table',
-                   Description='Is the table valid?',
-                   DataType=bool),
-        CommandOut(Name='report_lines',
-                   Description='Detailed report',
-                   DataType=list)
-    ])
+ at cli.command(name='validate-table')
+ at click.option('-i', '--input-fp', required=True,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The input filpath to validate against the BIOM format'
+                   ' specification')
+ at click.option('-f', '--format-version', default=None,
+              help='The specific format version to validate against')
+ at click.option('--detailed-report', is_flag=True, default=False,
+              help='Include more details in the output report')
+def validate_table(input_fp, format_version, detailed_report):
+    """Validate a BIOM-formatted file.
+
+    Test a file for adherence to the Biological Observation Matrix (BIOM)
+    format specification. This specification is defined at
+    http://biom-format.org
+
+    Example usage:
+
+    Validate the contents of table.biom for adherence to the BIOM format
+    specification
+
+    $ biom validate-table -i table.biom
+
+    """
+    valid, report = _validate_table(input_fp, format_version, detailed_report)
+    click.echo("\n".join(report))
+    if valid:
+        # apparently silence is too quiet to be golden.
+        click.echo("The input file is a valid BIOM-formatted file.")
+        sys.exit(0)
+    else:
+        click.echo("The input file is not a valid BIOM-formatted file.")
+        sys.exit(1)
+
+
+def _validate_table(input_fp, format_version=None, detailed_report=False):
+    result = TableValidator()(table=input_fp, format_version=format_version,
+                              detailed_report=detailed_report)
+    return result['valid_table'], result['report_lines']
+
+
+# Refactor in the future. Also need to address #664
+class TableValidator(object):
 
     FormatURL = "http://biom-format.org"
     TableTypes = set(['otu table', 'pathway table', 'function table',
                       'ortholog table', 'gene table', 'metabolite table',
                       'taxon table'])
     MatrixTypes = set(['sparse', 'dense'])
-    ElementTypes = {'int': int, 'str': str, 'float': float, 'unicode': unicode}
+    ElementTypes = {'int': int, 'str': str, 'float': float, 'unicode': str}
     HDF5FormatVersions = set([(2, 0), (2, 0, 0), (2, 1), (2, 1, 0)])
 
     def run(self, **kwargs):
@@ -86,8 +91,6 @@ class TableValidator(Command):
                 raise ValueError("Unrecognized format version: %s" %
                                  kwargs['format_version'])
 
-        # this is not pyqi-appriopriate, but how we parse this thing is
-        # dependent on runtime options :(
         with biom_open(kwargs['table']) as f:
             if is_json:
                 kwargs['table'] = json.load(f)
@@ -105,6 +108,10 @@ class TableValidator(Command):
                 raise IOError("h5py is not installed, can only validate JSON "
                               "tables")
 
+    def __call__(self, table, format_version=None, detailed_report=False):
+        return self.run(table=table, format_version=format_version,
+                        detailed_report=detailed_report)
+
     def _validate_hdf5(self, **kwargs):
         table = kwargs['table']
 
@@ -322,7 +329,10 @@ class TableValidator(Command):
 
     def _json_or_hdf5_get(self, table, key):
         if hasattr(table, 'attrs'):
-            return table.attrs.get(key, None)
+            item = table.attrs.get(key, None)
+            if item is not None and isinstance(item, bytes):
+                item = item.decode('utf8')
+            return item
         else:
             return table.get(key, None)
 
@@ -334,11 +344,11 @@ class TableValidator(Command):
 
     def _is_int(self, x):
         """Return True if x is an int"""
-        return isinstance(x, int)
+        return isinstance(x, (int, np.int64))
 
     def _valid_nnz(self, table):
         """Check if nnz seems correct"""
-        if not isinstance(table.attrs['nnz'], int):
+        if not self._is_int(table.attrs['nnz']):
             return "nnz is not an integer!"
         if table.attrs['nnz'] < 0:
             return "nnz is negative!"
@@ -382,6 +392,9 @@ class TableValidator(Command):
                        "%Y-%m-%dT%H:%M",
                        "%Y-%m-%dT%H:%M:%S",
                        "%Y-%m-%dT%H:%M:%S.%f"]
+        if isinstance(val, bytes):
+            val = val.decode('utf8')
+
         valid_time = False
         for fmt in valid_times:
             try:
@@ -560,5 +573,3 @@ class TableValidator(Command):
             return self._valid_dense_data(table_json)
         else:
             return "Unknown matrix type"
-
-CommandConstructor = TableValidator
diff --git a/biom/cli/uc_processor.py b/biom/cli/uc_processor.py
new file mode 100644
index 0000000..9534d73
--- /dev/null
+++ b/biom/cli/uc_processor.py
@@ -0,0 +1,85 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2011-2013, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import division
+
+import click
+
+from biom.cli import cli
+from biom.cli.util import write_biom_table
+from biom.parse import parse_uc
+from biom.exception import TableException
+
+
+ at cli.command('from-uc')
+ at click.option('-i', '--input-fp', required=True,
+              type=click.Path(exists=True, dir_okay=False),
+              help='The input uc filepath.')
+ at click.option('-o', '--output-fp', default=None,
+              type=click.Path(writable=True),
+              help='The output BIOM filepath', required=False)
+ at click.option('--rep-set-fp', type=click.Path(exists=True, dir_okay=False),
+              help="Fasta file containing representative sequences with "
+                   "where sequences are labeled with OTU identifiers, and "
+                   "description fields contain original sequence identifiers. "
+                   "This output is created, for example, by vsearch with the "
+                   "--relabel_sha1 --relabel_keep options.",
+              required=False)
+def from_uc(input_fp, output_fp, rep_set_fp):
+    """Create a BIOM table from a vsearch/uclust/usearch BIOM file.
+
+    Example usage:
+
+    Simple BIOM creation:
+
+    $ biom from-uc -i in.uc -o out.biom
+
+    BIOM creation with OTU re-naming:
+
+    $ biom from-uc -i in.uc -o out.biom --rep-set-fp rep-set.fna
+
+    """
+    input_f = open(input_fp, 'U')
+    if rep_set_fp is not None:
+        rep_set_f = open(rep_set_fp, 'U')
+    else:
+        rep_set_f = None
+    table = _from_uc(input_f, rep_set_f)
+    write_biom_table(table, 'hdf5', output_fp)
+
+
+def _id_map_from_fasta(fasta_lines):
+    result = {}
+    for line in fasta_lines:
+        if line.startswith('>'):
+            try:
+                obs_id, seq_id = line.split()[:2]
+            except ValueError:
+                raise ValueError('Sequence identifiers in fasta file '
+                                 'must contain at least two space-'
+                                 'separated fields.')
+            result[seq_id] = obs_id[1:]
+        else:
+            pass
+    return result
+
+
+def _from_uc(input_f, rep_set_f=None):
+    table = parse_uc(input_f)
+
+    if rep_set_f is not None:
+        obs_id_map = _id_map_from_fasta(rep_set_f)
+        try:
+            table.update_ids(obs_id_map, axis='observation', strict=True,
+                             inplace=True)
+        except TableException:
+            raise ValueError('Not all sequence identifiers in the input BIOM '
+                             'file are present in description fields in the '
+                             'representative sequence fasta file.')
+
+    return table
diff --git a/biom/cli/util.py b/biom/cli/util.py
new file mode 100644
index 0000000..8b5c972
--- /dev/null
+++ b/biom/cli/util.py
@@ -0,0 +1,35 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+from __future__ import division
+
+import biom.util
+import biom.parse
+
+
+def write_biom_table(table, fmt, filepath):
+    """Write table in specified format to filepath"""
+
+    if fmt not in ['hdf5', 'json', 'tsv']:
+        raise ValueError("Unknown file format")
+
+    if fmt == 'hdf5' and not biom.util.HAVE_H5PY:
+        fmt = 'json'
+
+    if fmt == 'json':
+        with open(filepath, 'w') as f:
+            f.write(table.to_json(biom.parse.generatedby()))
+    elif fmt == 'tsv':
+        with open(filepath, 'w') as f:
+            f.write(table)
+            f.write('\n')
+    else:
+        import h5py
+
+        with h5py.File(filepath, 'w') as f:
+            table.to_hdf5(f, biom.parse.generatedby())
diff --git a/biom/commands/__init__.py b/biom/commands/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/biom/commands/installation_informer.py b/biom/commands/installation_informer.py
deleted file mode 100644
index 11b126f..0000000
--- a/biom/commands/installation_informer.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import division
-from sys import platform, version as python_version, executable
-from pyqi.core.command import Command, CommandOut, ParameterCollection
-
-__author__ = "Greg Caporaso"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Daniel McDonald", "Jose Clemente", "Greg Caporaso",
-               "Jai Ram Rideout", "Justin Kuczynski", "Andreas Wilke",
-               "Tobias Paczian", "Rob Knight", "Folker Meyer", "Sue Huse"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Greg Caporaso"
-__email__ = "gregcaporaso at gmail.com"
-
-
-class InstallationInformer(Command):
-    BriefDescription = ("Provide information about the biom-format "
-                        "installation")
-    LongDescription = ("Provide information about the biom-format "
-                       "installation, including settings pulled from the "
-                       "configuration file. For more details, see "
-                       "http://biom-format.org")
-    CommandIns = ParameterCollection([])
-    CommandOuts = ParameterCollection([
-        CommandOut(Name='install_info_lines',
-                   DataType='str',
-                   Description='Installation info')
-    ])
-
-    def run(self, **kwargs):
-        lines = []
-
-        lines.extend(self.get_formatted_system_info())
-        lines.extend(self.get_formatted_dependency_version_info())
-        lines.extend(self.get_formatted_package_info())
-        lines.append('')
-
-        return {'install_info_lines': lines}
-
-    def get_formatted_system_info(self):
-        return self._format_info(self.get_system_info(), 'System information')
-
-    def get_formatted_dependency_version_info(self):
-        return self._format_info(self.get_dependency_version_info(),
-                                 'Dependency versions')
-
-    def get_formatted_package_info(self):
-        return self._format_info(self.get_package_info(),
-                                 'biom-format package information')
-
-    def get_system_info(self):
-        return (("Platform", platform),
-                ("Python/GCC version", python_version.replace('\n', ' ')),
-                ("Python executable", executable))
-
-    def get_dependency_version_info(self):
-        not_installed_msg = "Not installed"
-
-        try:
-            from pyqi import __version__ as pyqi_lib_version
-        except ImportError:
-            pyqi_lib_version = not_installed_msg
-
-        try:
-            from numpy import __version__ as numpy_lib_version
-        except ImportError:
-            numpy_lib_version = ("ERROR: Not installed - this is required! "
-                                 "(This will also cause the BIOM library to "
-                                 "not be importable.)")
-
-        try:
-            from scipy import __version__ as scipy_lib_version
-        except ImportError:
-            scipy_lib_version = not_installed_msg
-
-        try:
-            from h5py import __version__ as h5py_lib_version
-        except ImportError:
-            h5py_lib_version = ("WARNING: Not installed - this is an optional "
-                                "dependency. It is strongly recommended for "
-                                "large datasets.")
-
-        return (("pyqi version", pyqi_lib_version),
-                ("NumPy version", numpy_lib_version),
-                ("SciPy version", scipy_lib_version),
-                ("h5py version", h5py_lib_version))
-
-    def get_package_info(self):
-        import_error_msg = ("ERROR: Can't find the BIOM library code (or "
-                            "numpy) - is it installed and in your "
-                            "$PYTHONPATH?")
-        try:
-            from biom import __version__ as biom_lib_version
-        except ImportError:
-            biom_lib_version = import_error_msg
-
-        return (("biom-format version", biom_lib_version),)
-
-    def _format_info(self, info, title):
-        max_len = self._get_max_length(info)
-
-        lines = ['']
-        lines.append(title)
-        lines.append('=' * len(title))
-        for e in info:
-            lines.append("%*s:\t%s" % (max_len, e[0], e[1]))
-
-        return lines
-
-    def _get_max_length(self, info):
-        return max([len(e[0]) for e in info])
-
-CommandConstructor = InstallationInformer
diff --git a/biom/commands/metadata_adder.py b/biom/commands/metadata_adder.py
deleted file mode 100644
index cdab17f..0000000
--- a/biom/commands/metadata_adder.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import division
-from pyqi.core.command import (Command, CommandIn, CommandOut,
-                               ParameterCollection)
-from pyqi.core.exception import CommandError
-from biom.parse import MetadataMap
-from biom.table import Table
-
-__author__ = "Greg Caporaso"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Greg Caporaso", "Morgan Langille", "Jai Ram Rideout",
-               "Daniel McDonald"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Greg Caporaso"
-__email__ = "gregcaporaso at gmail.com"
-
-
-class MetadataAdder(Command):
-    BriefDescription = "Add metadata to a BIOM table"
-    LongDescription = ("Add sample and/or observation metadata to "
-                       "BIOM-formatted files. Detailed usage examples can be "
-                       "found here: http://biom-format.org/documentation/add"
-                       "ing_metadata.html")
-
-    CommandIns = ParameterCollection([
-        CommandIn(Name='table', DataType=Table,
-                  Description='the input BIOM table', Required=True),
-        # sample_metadata and observation_metadata are currently files (or
-        # file-like) because of the existing metadata map / processing function
-        # support. Ideally, these two parameters should be MetadataMap
-        # instances.
-        CommandIn(Name='sample_metadata', DataType=file,
-                  Description='the sample metadata map (will add sample '
-                  'metadata to the input BIOM table, if provided)'),
-        CommandIn(Name='observation_metadata', DataType=file,
-                  Description='the observation metadata map (will add '
-                  'observation metadata to the input BIOM table, if '
-                  'provided)'),
-        CommandIn(Name='sc_separated', DataType=list,
-                  Description='list of the metadata fields to split on '
-                  'semicolons. This is useful for hierarchical data such as '
-                  'taxonomy or functional categories'),
-        CommandIn(Name='sc_pipe_separated', DataType=list,
-                  Description='list of the metadata fields to split on '
-                  'semicolons and pipes ("|"). This is useful for '
-                  'hierarchical data such as functional categories with '
-                  'one-to-many mappings (e.g. x;y;z|x;y;w)'),
-        CommandIn(Name='int_fields', DataType=list,
-                  Description='list of the metadata fields to cast to '
-                  'integers. This is useful for integer data such as '
-                  '"DaysSinceStart"'),
-        CommandIn(Name='float_fields', DataType=list,
-                  Description='list of the metadata fields to cast to '
-                  'floating point numbers. This is useful for real number '
-                  'data such as "pH"'),
-        CommandIn(Name='sample_header', DataType=list,
-                  Description='list of the sample metadata field names. This '
-                  'is useful if a header line is not provided with the '
-                  'metadata, if you want to rename the fields, or if you want '
-                  'to include only the first n fields where n is the number '
-                  'of entries provided here',
-                  DefaultDescription='use header from sample metadata map'),
-        CommandIn(Name='observation_header', DataType=list,
-                  Description='list of the observation metadata field names. '
-                  'This is useful if a header line is not provided with the '
-                  'metadata, if you want to rename the fields, or if you want '
-                  'to include only the first n fields where n is the number '
-                  'of entries provided here',
-                  DefaultDescription='use header from observation metadata '
-                  'map'),
-        CommandIn(Name='output_as_json', DataType=bool,
-                  Description='Output as JSON', Default=False)
-    ])
-
-    CommandOuts = ParameterCollection([
-        CommandOut(Name='table', DataType=tuple,
-                   Description='Table with added metadata, and the output '
-                               'format')
-    ])
-
-    def run(self, **kwargs):
-        table = kwargs['table']
-        sample_metadata = kwargs['sample_metadata']
-        observation_metadata = kwargs['observation_metadata']
-        sc_separated = kwargs['sc_separated']
-        sc_pipe_separated = kwargs['sc_pipe_separated']
-        int_fields = kwargs['int_fields']
-        float_fields = kwargs['float_fields']
-        sample_header = kwargs['sample_header']
-        observation_header = kwargs['observation_header']
-        output_as = 'json' if kwargs['output_as_json'] else 'hdf5'
-
-        # define metadata processing functions, if any
-        process_fns = {}
-        if sc_separated is not None:
-            process_fns.update(dict.fromkeys(sc_separated,
-                                             self._split_on_semicolons))
-
-        if sc_pipe_separated is not None:
-            process_fns.update(dict.fromkeys(sc_pipe_separated,
-                               self._split_on_semicolons_and_pipes))
-
-        if int_fields is not None:
-            process_fns.update(dict.fromkeys(int_fields, self._int))
-
-        if float_fields is not None:
-            process_fns.update(dict.fromkeys(float_fields, self._float))
-
-        # parse mapping files
-        if sample_metadata is not None:
-            sample_metadata = MetadataMap.from_file(sample_metadata,
-                                                    process_fns=process_fns,
-                                                    header=sample_header)
-
-        if observation_metadata is not None:
-            observation_metadata = MetadataMap.from_file(
-                observation_metadata,
-                process_fns=process_fns,
-                header=observation_header)
-
-        if sample_metadata is None and observation_metadata is None:
-            raise CommandError('Must specify sample_metadata and/or '
-                               'observation_metadata.')
-
-        # NAUGHTY: this is modifying the input table IN PLACE!!! And then
-        # RETURNING IT! MetadataAdder is angry!
-
-        # add metadata as necessary
-        if sample_metadata:
-            table.add_metadata(sample_metadata, axis='sample')
-
-        if observation_metadata:
-            table.add_metadata(observation_metadata, axis='observation')
-
-        return {'table': (table, output_as)}
-
-    def _split_on_semicolons(self, x):
-        return [e.strip() for e in x.split(';')]
-
-    def _split_on_semicolons_and_pipes(self, x):
-        return [[e.strip() for e in y.split(';')] for y in x.split('|')]
-
-    def _int(self, x):
-        try:
-            return int(x)
-        except ValueError:
-            return x
-
-    def _float(self, x):
-        try:
-            return float(x)
-        except ValueError:
-            return x
-
-CommandConstructor = MetadataAdder
diff --git a/biom/commands/table_converter.py b/biom/commands/table_converter.py
deleted file mode 100644
index e0829c4..0000000
--- a/biom/commands/table_converter.py
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import division
-from pyqi.core.command import (Command, CommandIn, CommandOut,
-                               ParameterCollection)
-from pyqi.core.exception import CommandError
-from biom.table import Table
-from biom.parse import MetadataMap
-
-__author__ = "Greg Caporaso"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Greg Caporaso", "Daniel McDonald",
-               "Jose Carlos Clemente Litran", "Jai Ram Rideout",
-               "Jose Antonio Navas Molina", "Jorge Cañardo Alastuey"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Greg Caporaso"
-__email__ = "gregcaporaso at gmail.com"
-
-
-class TableConverter(Command):
-    TableTypes = ["OTU table",
-                  "Pathway table",
-                  "Function table",
-                  "Ortholog table",
-                  "Gene table",
-                  "Metabolite table",
-                  "Taxon table"]
-
-    ObservationMetadataTypes = {
-        'sc_separated': lambda x: [e.strip() for e in x.split(';')],
-        'naive': lambda x: x
-    }
-
-    ObservationMetadataFormatters = {
-        'sc_separated': lambda x: '; '.join(x),
-        'naive': lambda x: x
-    }
-
-    ObservationMetadataTypes['taxonomy'] = \
-        ObservationMetadataTypes['sc_separated']
-
-    BriefDescription = "Convert to/from the BIOM table format"
-    LongDescription = ("Convert between BIOM and 'classic' (tab-delimited) "
-                       "table formats. Detailed usage examples can be found "
-                       "here: http://biom-format.org/documentation/biom_conver"
-                       "sion.html")
-
-    CommandIns = ParameterCollection([
-        # This is not an ideal usage of the pyqi framework because we are
-        # expecting a file-like object here, and a lot of the parameters deal
-        # with I/O-ish things, like converting between file formats. Even
-        # though no I/O is forced here, it would be better to have rich objects
-        # as input and output, instead of lines of data. However, this will
-        # likely require a refactoring/redesign of our interface for table
-        # conversions because the primary input here can be either a BIOM table
-        # or a classic table. One possible solution is to split out different
-        # types of conversions into their own (smaller and simpler) commands,
-        # which would allow us to avoid some of this I/O-ish stuff.
-        CommandIn(Name='table', DataType=Table,
-                  Description='the input table (file-like object), either in '
-                  'BIOM or classic format', Required=True),
-        CommandIn(Name='to_json', DataType=bool,
-                  Description='Output as a JSON table', Default=False),
-        CommandIn(Name='to_hdf5', DataType=bool,
-                  Description='Output as a HDF5 table', Default=False),
-        CommandIn(Name='to_tsv', DataType=bool,
-                  Description='Output as a TSV table', Default=False),
-        CommandIn(Name='collapsed_samples', DataType=bool,
-                  Description='If to_hdf5 and the original table is a '
-                              'collapsed by samples biom table, this will '
-                              'update the sample metadata of the table to '
-                              'the supported HDF5 collapsed format'),
-        CommandIn(Name='collapsed_observations', DataType=bool,
-                  Description='If to_hdf5 and the original table is a '
-                              'collapsed by observations biom table, this will'
-                              ' update the observation metadata of the table '
-                              'to the supported HDF5 collapsed format'),
-        CommandIn(Name='sample_metadata', DataType=MetadataMap,
-                  Description='the sample metadata map (will add sample '
-                  'metadata to the BIOM table, if provided). Only applies '
-                  'when converting from classic table file to BIOM table '
-                  'file'),
-        CommandIn(Name='observation_metadata', DataType=MetadataMap,
-                  Description='the observation metadata map (will add '
-                  'observation metadata to the BIOM table, if provided). Only '
-                  'applies when converting from classic table file to BIOM '
-                  'table file'),
-        CommandIn(Name='header_key', DataType=str,
-                  Description='pull this key from observation metadata within '
-                  'a BIOM table file when creating a classic table file',
-                  DefaultDescription='no observation metadata will be '
-                  'included'),
-        CommandIn(Name='output_metadata_id', DataType=str,
-                  Description='the name to be given to the observation '
-                  'metadata column when creating a classic table from a BIOM-'
-                  'formatted table', DefaultDescription='same name as in the '
-                  'BIOM-formatted table'),
-        CommandIn(Name='table_type', DataType=str,
-                  Description='the type of the table, must be one of: %s' %
-                  ', '.join(TableTypes), Required=False),
-        CommandIn(Name='process_obs_metadata', DataType=str,
-                  Description='process metadata associated with observations '
-                  'when converting from a classic table. Must be one of: %s' %
-                  ', '.join(ObservationMetadataTypes), Default=None),
-        CommandIn(Name='tsv_metadata_formatter', DataType=str,
-                  Description='Method for formatting the observation '
-                  'metadata, must be one of: %s' %
-                  ', '.join(ObservationMetadataFormatters),
-                  Default='sc_separated')
-    ])
-
-    CommandOuts = ParameterCollection([
-        CommandOut(Name='table', DataType=tuple,
-                   Description='The resulting table and format')
-    ])
-
-    def run(self, **kwargs):
-        table = kwargs['table']
-        sample_metadata = kwargs['sample_metadata']
-        observation_metadata = kwargs['observation_metadata']
-        header_key = kwargs['header_key']
-        output_metadata_id = kwargs['output_metadata_id']
-        process_obs_metadata = kwargs['process_obs_metadata']
-        obs_md_fmt = kwargs['tsv_metadata_formatter']
-        table_type = kwargs['table_type']
-        to_tsv = kwargs['to_tsv']
-        to_hdf5 = kwargs['to_hdf5']
-        to_json = kwargs['to_json']
-        collapsed_observations = kwargs['collapsed_observations']
-        collapsed_samples = kwargs['collapsed_samples']
-
-        if sum([to_tsv, to_hdf5, to_json]) == 0:
-            raise CommandError("Must specify an output format")
-        elif sum([to_tsv, to_hdf5, to_json]) > 1:
-            raise CommandError("Can only specify a single output format")
-
-        # if we don't have a table type, then one is required to be specified
-        if table.type in [None, "None"]:
-            if table_type is None:
-                raise CommandError("Must specify --table-type!")
-            else:
-                if table_type not in self.TableTypes:
-                    raise CommandError("Unknown table type: %s" % table_type)
-
-                table.type = table_type
-
-        if obs_md_fmt not in self.ObservationMetadataFormatters:
-            raise CommandError("Unknown tsv_metadata_formatter: %s" %
-                               obs_md_fmt)
-        else:
-            obs_md_fmt_f = self.ObservationMetadataFormatters[obs_md_fmt]
-
-        if sample_metadata is not None:
-            table.add_metadata(sample_metadata)
-
-        # if the user does not specify a name for the output metadata column,
-        # set it to the same as the header key
-        output_metadata_id = output_metadata_id or header_key
-
-        if process_obs_metadata is not None and not to_tsv:
-            if process_obs_metadata not in self.ObservationMetadataTypes:
-                raise CommandError(
-                    "Unknown observation metadata processing method, must be "
-                    "one of: %s" %
-                    ', '.join(self.ObservationMetadataTypes.keys()))
-
-            if table.metadata(axis='observation') is None:
-                raise CommandError("Observation metadata processing requested "
-                                   "but it doesn't appear that there is any "
-                                   "metadata to operate on!")
-
-            # and if this came in as TSV, then we expect only a single type of
-            # metadata
-            md_key = table.metadata(axis='observation')[0].keys()[0]
-
-            process_f = self.ObservationMetadataTypes[process_obs_metadata]
-            it = zip(table.ids(axis='observation'),
-                     table.metadata(axis='observation'))
-            new_md = {id_: {md_key: process_f(md[md_key])} for id_, md in it}
-
-            if observation_metadata:
-                for k, v in observation_metadata.items():
-                    new_md[k].update(v)
-            table.add_metadata(new_md, 'observation')
-
-        if to_tsv:
-            result = table.to_tsv(header_key=header_key,
-                                  header_value=output_metadata_id,
-                                  metadata_formatter=obs_md_fmt_f)
-            fmt = 'tsv'
-        elif to_json:
-            result = table
-            fmt = 'json'
-        elif to_hdf5:
-            result = table
-            if collapsed_observations:
-                metadata = [{'collapsed_ids': md.keys()}
-                            for md in result.metadata(axis='observation')]
-                result._observation_metadata = metadata
-            if collapsed_samples:
-                metadata = [{'collapsed_ids': md.keys()}
-                            for md in result.metadata()]
-                result._sample_metadata = metadata
-            if collapsed_observations or collapsed_samples:
-                # We have changed the metadata, it is safer to make sure that
-                # it is correct
-                result._cast_metadata()
-            fmt = 'hdf5'
-
-        return {'table': (result, fmt)}
-
-CommandConstructor = TableConverter
diff --git a/biom/commands/table_normalizer.py b/biom/commands/table_normalizer.py
deleted file mode 100755
index 95b370a..0000000
--- a/biom/commands/table_normalizer.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import division
-from pyqi.core.command import (Command, CommandIn, CommandOut,
-                               ParameterCollection)
-from pyqi.core.exception import CommandError
-from biom.table import Table
-from biom.util import HAVE_H5PY
-from biom import load_table
-
-__author__ = "Michael Shaffer"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Michael Shaffer"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__author__ = "Michael Shaffer"
-__email__ = "michael.shaffer at ucdenver.edu"
-
-
-class TableNormalizer(Command):
-    Axes = ['sample', 'observation']
-
-    BriefDescription = "Normalize a BIOM table"
-    LongDescription = ("Normalize the values of a BIOM table through various "
-                       "methods. Relative abundance will take the relative "
-                       "abundance of each observation in terms of samples or "
-                       "observations.  Presence absensece will convert "
-                       "observations to 1's and 0's based on presence of the "
-                       "observation"
-                       )
-
-    CommandIns = ParameterCollection([
-        CommandIn(Name='biom_table', DataType=str,
-                  Description='the input BIOM table'),
-        CommandIn(Name='axis', DataType=str,
-                  Description='the axis to subset over, either ' +
-                  ' or '.join(Axes),
-                  Required=False),
-        CommandIn(Name='relative_abund', DataType=bool,
-                  Description='normalize the table by relative abundance',
-                  Required=False),
-        CommandIn(Name='presence_absence', DataType=bool,
-                  Description='convert table to presence/absence values',
-                  Required=False)
-    ])
-
-    CommandOuts = ParameterCollection([
-        CommandOut(Name='table', DataType=tuple,
-                   Description='The resulting table and format')
-    ])
-
-    def run(self, **kwargs):
-        biom_table = kwargs['biom_table']
-        axis = kwargs['axis']
-        relative_abund = kwargs['relative_abund']
-        p_a = kwargs['presence_absence']
-
-        if axis not in self.Axes:
-            raise CommandError("Invalid axis '%s'. Must be either %s." % (
-                axis,
-                ' or '.join(map(lambda e: "'%s'" % e, self.Axes))))
-
-        if biom_table is None:
-            raise CommandError("Must specify an input table")
-
-        if relative_abund is False and p_a is False:
-            raise CommandError("Must specifiy a normalization type")
-        elif relative_abund is True and p_a is True:
-            raise CommandError("Must specify only one normalization type")
-
-        table = load_table(biom_table)
-
-        if relative_abund is True:
-            table.norm(axis=axis)
-        else:
-            table.pa()
-
-        if HAVE_H5PY:
-            return {'table': (table, 'hdf5')}
-        else:
-            return {'table': (table, 'json')}
-
-CommandConstructor = TableNormalizer
diff --git a/biom/commands/table_subsetter.py b/biom/commands/table_subsetter.py
deleted file mode 100644
index 4ae26e9..0000000
--- a/biom/commands/table_subsetter.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from __future__ import division
-from pyqi.core.command import (Command, CommandIn, CommandOut,
-                               ParameterCollection)
-from pyqi.core.exception import CommandError
-from biom.parse import get_axis_indices, direct_slice_data, direct_parse_key
-from biom.table import Table
-from biom.util import biom_open
-
-__author__ = "Daniel McDonald"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Daniel McDonald", "Jai Ram Rideout",
-               "Jose Antonio Navas Molina"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__author__ = "Daniel McDonald"
-__email__ = "daniel.mcdonald at colorado.edu"
-
-
-class TableSubsetter(Command):
-    Axes = ['sample', 'observation']
-
-    BriefDescription = "Subset a BIOM table"
-    LongDescription = ("Subset a BIOM table, over either observations or "
-                       "samples, without fully parsing it. This command is "
-                       "intended to assist in working with very large tables "
-                       "when tight on memory, or as a lightweight way to "
-                       "subset a full table. Currently, it is possible to "
-                       "produce tables with rows or columns (observations or "
-                       "samples) that are fully zeroed.")
-
-    CommandIns = ParameterCollection([
-        CommandIn(Name='json_table_str', DataType=str,
-                  Description='the input BIOM table as an unparsed json '
-                              'string',
-                  Required=False),
-        CommandIn(Name='hdf5_table', DataType=str,
-                  Description='the fp to the input BIOM table',
-                  Required=False),
-        CommandIn(Name='axis', DataType=str,
-                  Description='the axis to subset over, either ' +
-                  ' or '.join(Axes), Required=True),
-        CommandIn(Name='ids', DataType=list,
-                  Description='the IDs to retain (either sample IDs or '
-                  'observation IDs, depending on the axis)', Required=True)
-    ])
-
-    CommandOuts = ParameterCollection([
-        CommandOut(Name='subsetted_table', DataType=tuple,
-                   Description='The subset generator')
-    ])
-
-    def run(self, **kwargs):
-        json_table_str = kwargs['json_table_str']
-        hdf5_biom = kwargs['hdf5_table']
-        axis = kwargs['axis']
-        ids = kwargs['ids']
-
-        if axis not in self.Axes:
-            raise CommandError("Invalid axis '%s'. Must be either %s." % (
-                axis,
-                ' or '.join(map(lambda e: "'%s'" % e, self.Axes))))
-
-        if hdf5_biom is None and json_table_str is None:
-            raise CommandError("Must specify an input table")
-        elif hdf5_biom is not None and json_table_str is not None:
-            raise CommandError("Can only specify one input table")
-
-        if json_table_str is not None:
-            idxs, new_axis_md = get_axis_indices(json_table_str, ids, axis)
-            new_data = direct_slice_data(json_table_str, idxs, axis)
-
-            # multiple walks over the string. bad form, but easy right now
-            # ...should add a yield_and_ignore parser or something.
-            def subset_generator():
-                yield "{"
-                yield direct_parse_key(json_table_str, "id")
-                yield ","
-                yield direct_parse_key(json_table_str, "format")
-                yield ","
-                yield direct_parse_key(json_table_str, "format_url")
-                yield ","
-                yield direct_parse_key(json_table_str, "type")
-                yield ","
-                yield direct_parse_key(json_table_str, "generated_by")
-                yield ","
-                yield direct_parse_key(json_table_str, "date")
-                yield ","
-                yield direct_parse_key(json_table_str, "matrix_type")
-                yield ","
-                yield direct_parse_key(json_table_str, "matrix_element_type")
-                yield ","
-                yield new_data
-                yield ","
-                yield new_axis_md
-                yield ","
-
-                if axis == "observation":
-                    yield direct_parse_key(json_table_str, "columns")
-                else:
-                    yield direct_parse_key(json_table_str, "rows")
-                yield "}"
-
-            format_ = 'json'
-            table = subset_generator()
-        else:
-            with biom_open(hdf5_biom) as f:
-                table = Table.from_hdf5(f, ids=ids, axis=axis)
-            format_ = 'hdf5'
-
-        return {'subsetted_table': (table, format_)}
-
-CommandConstructor = TableSubsetter
diff --git a/biom/commands/table_summarizer.py b/biom/commands/table_summarizer.py
deleted file mode 100644
index 0d6442e..0000000
--- a/biom/commands/table_summarizer.py
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from __future__ import division
-from signal import signal, SIGPIPE, SIG_DFL
-from operator import itemgetter
-
-from pyqi.core.command import (Command, CommandIn, CommandOut,
-                               ParameterCollection)
-
-from numpy import std
-from biom.util import compute_counts_per_sample_stats
-
-# Ignore SIG_PIPE and don't throw exceptions on it. This allows the user to
-# pipe the output from summarize table to other commands.
-# http://newbebweb.blogspot.com/2012/02/python-head-ioerror-errno-32-broken.html
-# http://docs.python.org/library/signal.html
-signal(SIGPIPE, SIG_DFL)
-
-
-__author__ = "Greg Caporaso"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Greg Caporaso", "Daniel McDonald", "Jose Antonio Navas Molina"]
-__license__ = "BSD"
-__maintainer__ = "Greg Caporaso"
-__email__ = "gregcaporaso at gmail.com"
-
-
-class TableSummarizer(Command):
-
-    """
-     Example usage:
-      from biom.commands.table_summarizer import TableSummarizer
-      from biom.parse import parse_biom_table
-      c = TableSummarizer()
-      table_f = open("table.biom")
-      t = parse_biom_table(table_f)
-      table_f.seek(0)
-      result = c(table=(t,None))
-      result = c(table=(t,None),qualitative=True)
-      result = c(table=(t,table_f),qualitative=True)
-      table_f.close()
-    """
-    BriefDescription = "Summarize sample or observation data in a BIOM table"
-    LongDescription = ("Provides details on the observation counts per sample,"
-                       " including summary statistics, as well as metadata "
-                       "categories associated with samples and observations.")
-
-    CommandIns = ParameterCollection([
-        CommandIn(Name='table',
-                  DataType=tuple,
-                  Description='the input BIOM table',
-                  Required=True),
-        CommandIn(Name='qualitative',
-                  DataType=bool,
-                  Description=('Present counts as number of unique '
-                               'observation ids per sample, rather than '
-                               'counts of observations per sample.'),
-                  Required=False,
-                  Default=False),
-        CommandIn(Name='observations',
-                  DataType=bool,
-                  Default=False,
-                  Description=('Summarize over observations'))
-    ])
-
-    CommandOuts = ParameterCollection([
-        CommandOut(Name='biom_summary',
-                   DataType=list,
-                   Description='The table summary')
-    ])
-
-    def run(self, **kwargs):
-        result = {}
-        qualitative = kwargs['qualitative']
-        by_observations = kwargs['observations']
-        table, table_lines = kwargs['table']
-
-        if by_observations:
-            table = table.transpose()
-
-        min_counts, max_counts, median_counts, mean_counts, counts_per_samp =\
-            compute_counts_per_sample_stats(table, qualitative)
-        num_observations = len(table.ids(axis='observation'))
-
-        counts_per_sample_values = counts_per_samp.values()
-
-        if table.metadata() is None:
-            sample_md_keys = ["None provided"]
-        else:
-            sample_md_keys = table.metadata()[0].keys()
-
-        if table.metadata(axis='observation') is None:
-            observation_md_keys = ["None provided"]
-        else:
-            observation_md_keys = table.metadata(axis='observation')[0].keys()
-
-        lines = []
-
-        num_samples = len(table.ids())
-
-        if by_observations:
-            # as this is a transpose of the original table...
-            lines.append('Num samples: %d' % num_observations)
-            lines.append('Num observations: %d' % num_samples)
-        else:
-            lines.append('Num samples: %d' % num_samples)
-            lines.append('Num observations: %d' % num_observations)
-
-        if not qualitative:
-            total_count = sum(counts_per_sample_values)
-            lines.append('Total count: %d' % total_count)
-            lines.append('Table density (fraction of non-zero values): %1.3f' %
-                         table.get_table_density())
-
-        lines.append('')
-
-        if qualitative:
-            if by_observations:
-                lines.append('Sample/observations summary:')
-            else:
-                lines.append('Observations/sample summary:')
-        else:
-            lines.append('Counts/sample summary:')
-
-        lines.append(' Min: %r' % min_counts)
-        lines.append(' Max: %r' % max_counts)
-        lines.append(' Median: %1.3f' % median_counts)
-        lines.append(' Mean: %1.3f' % mean_counts)
-        lines.append(' Std. dev.: %1.3f' % std(counts_per_sample_values))
-
-        if by_observations:
-            # since this is a transpose...
-            lines.append(
-                ' Sample Metadata Categories: %s' %
-                '; '.join(observation_md_keys))
-            lines.append(
-                ' Observation Metadata Categories: %s' %
-                '; '.join(sample_md_keys))
-            lines.append('')
-        else:
-            lines.append(
-                ' Sample Metadata Categories: %s' %
-                '; '.join(sample_md_keys))
-            lines.append(
-                ' Observation Metadata Categories: %s' %
-                '; '.join(observation_md_keys))
-            lines.append('')
-
-        if qualitative:
-            lines.append('Observations/sample detail:')
-        else:
-            lines.append('Counts/sample detail:')
-
-        for k, v in sorted(counts_per_samp.items(), key=itemgetter(1)):
-            lines.append(' %s: %r' % (k, v))
-
-        result['biom_summary'] = lines
-        return result
-
-CommandConstructor = TableSummarizer
diff --git a/biom/interfaces/__init__.py b/biom/interfaces/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/biom/interfaces/html/__init__.py b/biom/interfaces/html/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/biom/interfaces/html/config/__init__.py b/biom/interfaces/html/config/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/biom/interfaces/html/config/add_metadata.py b/biom/interfaces/html/config/add_metadata.py
deleted file mode 100644
index d400236..0000000
--- a/biom/interfaces/html/config/add_metadata.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.html import (HTMLInputOption, HTMLDownload)
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from pyqi.core.interfaces.html.output_handler import newline_list_of_strings
-from pyqi.core.interfaces.optparse.input_handler import string_list_handler
-
-from biom.interfaces.html.input_handler import load_biom_table
-from biom.commands.metadata_adder import CommandConstructor
-
-__author__ = "Evan Bolyen"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = [
-    "Evan Bolyen", "Jai Ram Rideout", "Greg Caporaso", "Morgan Langille",
-    "Daniel McDonald"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Evan Bolyen"
-__email__ = "ebolyen at gmail.com"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-
-inputs = [
-    HTMLInputOption(Parameter=cmd_in_lookup('table'),
-                    Type='upload_file',
-                    Handler=load_biom_table,
-                    Name='input-fp'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('sample_metadata'),
-                    Type='upload_file',
-                    Name='sample-metadata-fp'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('observation_metadata'),
-                    Type='upload_file',
-                    Name='observation-metadata-fp'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('sc_separated'),
-                    Handler=string_list_handler,
-                    Help='comma-separated list of the metadata fields to '
-                    'split on semicolons. This is useful for hierarchical '
-                    'data such as taxonomy or functional categories'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('sc_pipe_separated'),
-                    Handler=string_list_handler,
-                    Help='comma-separated list of the metadata fields to split'
-                    ' on semicolons and pipes ("|"). This is useful for '
-                    'hierarchical data such as functional categories with '
-                    'one-to-many mappings (e.g. x;y;z|x;y;w)'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('int_fields'),
-                    Handler=string_list_handler,
-                    Help='comma-separated list of the metadata fields to cast '
-                    'to integers. This is useful for integer data such as '
-                    '"DaysSinceStart"'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('float_fields'),
-                    Handler=string_list_handler,
-                    Help='comma-separated list of the metadata fields to cast '
-                    'to floating point numbers. This is useful for real number'
-                    ' data such as "pH"'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('sample_header'),
-                    Handler=string_list_handler,
-                    Help='comma-separated list of the sample metadata field '
-                    'names. This is useful if a header line is not provided '
-                    'with the metadata, if you want to rename the fields, or '
-                    'if you want to include only the first n fields where n is'
-                    ' the number of entries provided here'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('observation_header'),
-                    Handler=string_list_handler,
-                    Help='comma-separated list of the observation metadata '
-                    'field names. This is useful if a header line is not '
-                    'provided with the metadata, if you want to rename the '
-                    'fields, or if you want to include only the first n fields'
-                    ' where n is the number of entries provided here'),
-    HTMLInputOption(Parameter=None,
-                    Name='download-file',
-                    Required=True,
-                    Help='the download file')
-]
-
-outputs = [
-    HTMLDownload(Parameter=cmd_out_lookup('table'),
-                 Handler=newline_list_of_strings,
-                 FilenameLookup='download-file',
-                 FileExtension='.biom')
-]
diff --git a/biom/interfaces/html/config/convert.py b/biom/interfaces/html/config/convert.py
deleted file mode 100644
index bfdfa11..0000000
--- a/biom/interfaces/html/config/convert.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.html import (HTMLInputOption, HTMLDownload)
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from pyqi.core.interfaces.html.output_handler import newline_list_of_strings
-
-from biom.interfaces.html.input_handler import load_metadata
-from biom.commands.table_converter import CommandConstructor
-
-__author__ = "Evan Bolyen"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = [
-    "Evan Bolyen",
-    "Jai Ram Rideout",
-    "Greg Caporaso",
-    "Daniel McDonald"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Evan Bolyen"
-__email__ = "ebolyen at gmail.com"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-
-inputs = [
-    HTMLInputOption(Parameter=cmd_in_lookup('table'),
-                    Type='upload_file',
-                    Help='the input table filepath, either in BIOM or classic '
-                    'format'),
-    HTMLInputOption(Parameter=cmd_in_lookup('to_tsv'),
-                    Type=bool),
-    HTMLInputOption(Parameter=cmd_in_lookup('to_json'),
-                    Type=bool),
-    HTMLInputOption(Parameter=cmd_in_lookup('to_hdf5'),
-                    Type=bool),
-    HTMLInputOption(Parameter=cmd_in_lookup('sample_metadata'),
-                    Type='upload_file',
-                    Handler=load_metadata),
-    HTMLInputOption(Parameter=cmd_in_lookup('observation_metadata'),
-                    Type='upload_file',
-                    Handler=load_metadata),
-    HTMLInputOption(Parameter=cmd_in_lookup('header_key')),
-    HTMLInputOption(Parameter=cmd_in_lookup('output_metadata_id')),
-    HTMLInputOption(Parameter=cmd_in_lookup('process_obs_metadata'),
-                    Type='multiple_choice',
-                    Choices=['taxonomy', 'naive', 'sc_separated'],
-                    Help='Process metadata associated with observations when '
-                    'converting from a classic table'),
-    HTMLInputOption(Parameter=cmd_in_lookup('tsv_metadata_formatter'),
-                    Type='multiple_choice',
-                    Choices=['naive', 'sc_separated'],
-                    Help='Format the metadata for TSV output'),
-    HTMLInputOption(Parameter=None,
-                    Name='download-file',
-                    Required=True,
-                    Help='the download file')
-]
-
-outputs = [
-    HTMLDownload(Parameter=cmd_out_lookup('table'),
-                 Handler=newline_list_of_strings,
-                 FilenameLookup='download-file',
-                 FileExtension='.biom')
-]
diff --git a/biom/interfaces/html/config/normalize_table.py b/biom/interfaces/html/config/normalize_table.py
deleted file mode 100755
index 5151717..0000000
--- a/biom/interfaces/html/config/normalize_table.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.html import (HTMLInputOption, HTMLDownload)
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from pyqi.core.interfaces.html.output_handler import newline_list_of_strings
-
-from biom.commands.table_converter import CommandConstructor
-
-__author__ = "Michael Shaffer"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Michael Shaffer"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Michael Shaffer"
-__email__ = "michael.shaffer at ucdenver.edu"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-
-inputs = [
-    HTMLInputOption(Parameter=cmd_in_lookup('biom_table'),
-                    Type='upload_file',
-                    Required=True,
-                    Help='the input table filepath'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('relative_abund'),
-                    Type=bool),
-    HTMLInputOption(Parameter=cmd_in_lookup('presence_absence'),
-                    Type=bool),
-    HTMLInputOption(Parameter=cmd_in_lookup('axis'),
-                    Type='multiple_choice',
-                    Choices=['observation', 'sample'],
-                    Help='axis by which to normalize the table'),
-
-    HTMLInputOption(Parameter=None,
-                    Name='download-file',
-                    Required=True,
-                    Help='the download file')
-]
-
-outputs = [
-    HTMLDownload(Parameter=cmd_out_lookup('table'),
-                 Handler=newline_list_of_strings,
-                 FilenameLookup='download-file',
-                 FileExtension='.biom')
-]
diff --git a/biom/interfaces/html/config/show_install_info.py b/biom/interfaces/html/config/show_install_info.py
deleted file mode 100644
index da8772e..0000000
--- a/biom/interfaces/html/config/show_install_info.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.html import HTMLPage
-from pyqi.core.command import make_command_out_collection_lookup_f
-from pyqi.core.interfaces.html.output_handler import html_list_of_strings
-from biom.commands.installation_informer import CommandConstructor
-
-__author__ = "Evan Bolyen"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = [
-    "Evan Bolyen",
-    "Jai Ram Rideout",
-    "Greg Caporaso",
-    "Daniel McDonald"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Evan Bolyen"
-__email__ = "ebolyen at gmail.com"
-
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-
-inputs = []
-
-outputs = [
-    HTMLPage(Parameter=cmd_out_lookup('install_info_lines'),
-             Handler=html_list_of_strings)
-]
diff --git a/biom/interfaces/html/config/summarize_table.py b/biom/interfaces/html/config/summarize_table.py
deleted file mode 100644
index ec976e7..0000000
--- a/biom/interfaces/html/config/summarize_table.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from pyqi.core.interfaces.html.output_handler import newline_list_of_strings
-from pyqi.core.interfaces.html import (HTMLInputOption, HTMLDownload)
-from biom.commands.table_summarizer import CommandConstructor
-from biom.interfaces.html.input_handler import (
-    load_biom_table_with_file_contents
-    )
-
-__author__ = "Evan Bolyen"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = [
-    "Evan Bolyen",
-    "Greg Caporaso",
-    "Jai Ram Rideout",
-    "Daniel McDonald"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Evan Bolyen"
-__email__ = "ebolyen at gmail.com"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-inputs = [
-    HTMLInputOption(Parameter=cmd_in_lookup('table'),
-                    Type="upload_file",
-                    Handler=load_biom_table_with_file_contents,
-                    Name='input-fp'),
-    HTMLInputOption(Parameter=cmd_in_lookup('qualitative'),
-                    Type=bool),
-    HTMLInputOption(Parameter=None,
-                    Name='download-file',
-                    Required=True,
-                    Help='the download file')
-]
-
-outputs = [
-    HTMLDownload(Parameter=cmd_out_lookup('biom_summary'),
-                 Handler=newline_list_of_strings,
-                 FilenameLookup='download-file',
-                 FileExtension='.biom.summary.txt')
-]
diff --git a/biom/interfaces/html/config/validate_table.py b/biom/interfaces/html/config/validate_table.py
deleted file mode 100644
index 50ddf40..0000000
--- a/biom/interfaces/html/config/validate_table.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.html import (HTMLInputOption, HTMLPage)
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from biom.commands.table_validator import CommandConstructor
-from biom.interfaces.html.input_handler import load_json_document
-
-__author__ = "Evan Bolyen"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Evan Bolyen", "Jai Ram Rideout", "Daniel McDonald",
-               "Jorge Cañardo Alastuey"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Evan Bolyen"
-__email__ = "ebolyen at gmail.com"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-
-def display_table_validity(result_key, data, option_value=None):
-    if data is None:
-        return "The input file is a valid BIOM-formatted file."
-    else:
-        to_join = ["The input file is not a valid BIOM-formatted file."]
-        to_join += data
-        return "<br/>".join(to_join)
-
-
-inputs = [
-    HTMLInputOption(Parameter=cmd_in_lookup('table'),
-                    Type='upload_file',
-                    Handler=load_json_document,
-                    Name='input-fp',
-                    Help='the input file to validate against the BIOM '
-                    'format specification'),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('format_version')),
-
-    HTMLInputOption(Parameter=cmd_in_lookup('detailed_report'), Type=bool),
-]
-
-outputs = [
-    HTMLPage(Parameter=cmd_out_lookup('report_lines'),
-             Handler=display_table_validity)
-]
diff --git a/biom/interfaces/html/input_handler.py b/biom/interfaces/html/input_handler.py
deleted file mode 100644
index a230dd2..0000000
--- a/biom/interfaces/html/input_handler.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-import json
-from biom.parse import MetadataMap, parse_biom_table
-
-__author__ = "Evan Bolyen"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Evan Bolyen", "Greg Caporaso", "Jai Ram Rideout"]
-__license__ = "BSD"
-__maintainer__ = "Evan Bolyen"
-__email__ = "ebolyen at gmail.com"
-
-
-def load_biom_table(table_f):
-    """Return a parsed BIOM table."""
-    return parse_biom_table(table_f)
-
-
-def load_biom_table_with_file_contents(biom_f):
-    """Return a BIOM table and the original open filehandle as a tuple.
-
-    Useful when additional computation needs to be performed on the file
-    contents, such as an MD5 sum.
-
-    WARNING: this function does not close the open filehandle that it returns.
-    Users of this function are responsible for closing the filehandle when done
-    using it!
-    """
-    table = parse_biom_table(biom_f)
-    if hasattr(biom_f, 'seek'):
-        biom_f.seek(0)
-    return table, biom_f
-
-
-def load_json_document(f):
-    """Return a parsed JSON object."""
-    return json.load(f)
-
-
-def load_metadata(lines):
-    """Parse a sample/observation metadata file, return a ``MetadataMap``.
-
-    If ``lines`` is ``None``, this function will return ``None``.
-    """
-    if lines is not None:
-        return MetadataMap.from_file(lines)
-
-    return None
diff --git a/biom/interfaces/optparse/__init__.py b/biom/interfaces/optparse/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/biom/interfaces/optparse/config/__init__.py b/biom/interfaces/optparse/config/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/biom/interfaces/optparse/config/add_metadata.py b/biom/interfaces/optparse/config/add_metadata.py
deleted file mode 100644
index 47cb2d7..0000000
--- a/biom/interfaces/optparse/config/add_metadata.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.optparse import (OptparseOption,
-                                           OptparseUsageExample,
-                                           OptparseResult)
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from pyqi.core.interfaces.optparse.input_handler import (file_reading_handler,
-                                                         string_list_handler)
-from biom.commands.metadata_adder import CommandConstructor
-from biom.interfaces.optparse.input_handler import load_biom_table
-from biom.interfaces.optparse.output_handler import write_biom_table
-from biom.util import HAVE_H5PY
-
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout", "Greg Caporaso", "Morgan Langille",
-               "Daniel McDonald"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-usage_examples = [
-    OptparseUsageExample(ShortDesc="Adding sample metadata",
-                         LongDesc="Add sample metadata to a BIOM table",
-                         Ex="%prog -i otu_table.biom -o "
-                            "table_with_sample_metadata.biom -m "
-                            "sample_metadata.txt")
-]
-
-inputs = [
-    OptparseOption(Parameter=cmd_in_lookup('table'),
-                   Type='existing_filepath',
-                   Handler=load_biom_table, ShortName='i',
-                   Name='input-fp'),
-
-    OptparseOption(Parameter=cmd_in_lookup('sample_metadata'),
-                   Type='existing_filepath',
-                   Handler=file_reading_handler, ShortName='m',
-                   Name='sample-metadata-fp'),
-
-    OptparseOption(Parameter=cmd_in_lookup('observation_metadata'),
-                   Type='existing_filepath',
-                   Handler=file_reading_handler,
-                   Name='observation-metadata-fp'),
-
-    OptparseOption(Parameter=cmd_in_lookup('sc_separated'),
-                   Handler=string_list_handler,
-                   Help='comma-separated list of the metadata fields to split '
-                   'on semicolons. This is useful for hierarchical data such '
-                   'as taxonomy or functional categories'),
-
-    OptparseOption(Parameter=cmd_in_lookup('sc_pipe_separated'),
-                   Handler=string_list_handler,
-                   Help='comma-separated list of the metadata fields to split '
-                   'on semicolons and pipes ("|"). This is useful for '
-                   'hierarchical data such as functional categories with '
-                   'one-to-many mappings (e.g. x;y;z|x;y;w)'),
-
-    OptparseOption(Parameter=cmd_in_lookup('int_fields'),
-                   Handler=string_list_handler,
-                   Help='comma-separated list of the metadata fields to cast '
-                   'to integers. This is useful for integer data such as '
-                   '"DaysSinceStart"'),
-
-    OptparseOption(Parameter=cmd_in_lookup('float_fields'),
-                   Handler=string_list_handler,
-                   Help='comma-separated list of the metadata fields to cast '
-                   'to floating point numbers. This is useful for real number '
-                   'data such as "pH"'),
-
-    OptparseOption(Parameter=cmd_in_lookup('sample_header'),
-                   Handler=string_list_handler,
-                   Help='comma-separated list of the sample metadata field '
-                   'names. This is useful if a header line is not provided '
-                   'with the metadata, if you want to rename the fields, or '
-                   'if you want to include only the first n fields where n is '
-                   'the number of entries provided here'),
-
-    OptparseOption(Parameter=cmd_in_lookup('observation_header'),
-                   Handler=string_list_handler,
-                   Help='comma-separated list of the observation metadata '
-                   'field names. This is useful if a header line is not '
-                   'provided with the metadata, if you want to rename the '
-                   'fields, or if you want to include only the first n fields '
-                   'where n is the number of entries provided here'),
-
-    OptparseOption(Parameter=cmd_in_lookup('output_as_json'),
-                   Type=None,
-                   Default=not HAVE_H5PY,
-                   Action='store_true'),
-
-    OptparseOption(Parameter=None, Type='new_filepath', ShortName='o',
-                   Name='output-fp', Required=True,
-                   Help='the output BIOM table')
-]
-
-outputs = [
-    OptparseResult(Parameter=cmd_out_lookup('table'),
-                   Handler=write_biom_table,
-                   InputName='output-fp')
-]
diff --git a/biom/interfaces/optparse/config/convert.py b/biom/interfaces/optparse/config/convert.py
deleted file mode 100644
index ab33097..0000000
--- a/biom/interfaces/optparse/config/convert.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.optparse import (OptparseUsageExample,
-                                           OptparseOption, OptparseResult)
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from biom.interfaces.optparse.input_handler import (load_biom_table,
-                                                    load_metadata)
-from biom.interfaces.optparse.output_handler import write_biom_table
-from biom.commands.table_converter import CommandConstructor
-
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout", "Greg Caporaso", "Daniel McDonald"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-usage_examples = [
-    OptparseUsageExample(ShortDesc="Converting from classic to BIOM format",
-                         LongDesc="Convert the classic file table.txt to a "
-                                  "HDF5 BIOM format OTU table",
-                         Ex='%prog -i table.txt -o table.biom '
-                            '--table-type "OTU table" --to-hdf5')
-]
-
-inputs = [
-    OptparseOption(Parameter=cmd_in_lookup('table'),
-                   Type='existing_filepath',
-                   Handler=load_biom_table,
-                   ShortName='i', Name='input-fp',
-                   Help='the input table filepath, either in BIOM or classic '
-                   'format'),
-    OptparseOption(Parameter=cmd_in_lookup('sample_metadata'),
-                   Type='existing_filepath',
-                   Handler=load_metadata,
-                   ShortName='m',
-                   Name='sample-metadata-fp'),
-    OptparseOption(Parameter=cmd_in_lookup('observation_metadata'),
-                   Type='existing_filepath',
-                   Handler=load_metadata, Name='observation-metadata-fp'),
-    OptparseOption(Parameter=cmd_in_lookup('header_key')),
-    OptparseOption(Parameter=cmd_in_lookup('output_metadata_id')),
-    OptparseOption(Parameter=cmd_in_lookup('process_obs_metadata')),
-    OptparseOption(Parameter=cmd_in_lookup('table_type')),
-    OptparseOption(Parameter=cmd_in_lookup('tsv_metadata_formatter')),
-    OptparseOption(Parameter=cmd_in_lookup('to_json'),
-                   Action='store_true'),
-    OptparseOption(Parameter=cmd_in_lookup('to_tsv'),
-                   Action='store_true'),
-    OptparseOption(Parameter=cmd_in_lookup('to_hdf5'),
-                   Action='store_true'),
-    OptparseOption(Parameter=cmd_in_lookup('collapsed_samples'),
-                   Action='store_true'),
-    OptparseOption(Parameter=cmd_in_lookup('collapsed_observations'),
-                   Action='store_true'),
-    OptparseOption(Parameter=None,
-                   Type='new_filepath',
-                   ShortName='o',
-                   Name='output-fp',
-                   Required=True,
-                   Help='the output filepath')
-]
-
-outputs = [
-    OptparseResult(Parameter=cmd_out_lookup('table'),
-                   Handler=write_biom_table,
-                   InputName='output-fp')
-]
diff --git a/biom/interfaces/optparse/config/normalize_table.py b/biom/interfaces/optparse/config/normalize_table.py
deleted file mode 100755
index 2aec86d..0000000
--- a/biom/interfaces/optparse/config/normalize_table.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.optparse import (OptparseOption,
-                                           OptparseUsageExample,
-                                           OptparseResult)
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from biom.interfaces.optparse.output_handler import write_biom_table
-from biom.commands.table_normalizer import CommandConstructor
-
-__author__ = "Michael Shaffer"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Michael Shaffer"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Michael Shaffer"
-__email__ = "michael.shaffer at ucdenver.edu"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-
-usage_examples = [
-    OptparseUsageExample(ShortDesc="Normalizing a BIOM table to relative"
-                                   "abundnace",
-                         LongDesc="Take a BIOM table and replace all values "
-                                  "with their relative abundance in relation "
-                                  "to the sample",
-                         Ex="%prog -i table.biom -r -o "
-                            "normalized_table.biom"),
-    OptparseUsageExample(ShortDesc="Converting a BIOM table to a "
-                                   "presence/absence table",
-                         LongDesc="Take a BIOM table and convert the values "
-                                  "to 0's and 1's based on presensce or "
-                                  "absence of observations",
-                         Ex="%prog -i table.biom -p -o converted_table.biom")
-]
-
-inputs = [
-    # table input
-    OptparseOption(Parameter=cmd_in_lookup('biom_table'),
-                   Type='existing_filepath',
-                   Handler=None, ShortName='i',
-                   Name='input-fp', Required=True,
-                   Help='the input BIOM table filepath to subset'),
-
-    # normalization to relative_abundance
-    OptparseOption(Parameter=cmd_in_lookup('relative_abund'),
-                   ShortName='r',
-                   Action='store_true',
-                   Help='convert table to relative abundance'),
-
-    # conversion to presensce/absence
-    OptparseOption(Parameter=cmd_in_lookup('presence_absence'),
-                   ShortName='p',
-                   Action='store_true',
-                   Help='convert table to presence/absence'),
-
-    # if relative abundance then normalize by sample or by observation
-    OptparseOption(Parameter=cmd_in_lookup('axis'),
-                   ShortName='a',
-                   Default='sample'),
-
-    OptparseOption(Parameter=None,
-                   Type='new_filepath',
-                   ShortName='o',
-                   Name='output-fp',
-                   Required=True,
-                   Help='the output filepath')
-]
-
-outputs = [
-    OptparseResult(Parameter=cmd_out_lookup('table'),
-                   Handler=write_biom_table,
-                   InputName='output-fp')
-]
diff --git a/biom/interfaces/optparse/config/show_install_info.py b/biom/interfaces/optparse/config/show_install_info.py
deleted file mode 100644
index 88bbaa7..0000000
--- a/biom/interfaces/optparse/config/show_install_info.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.optparse import (OptparseUsageExample,
-                                           OptparseResult)
-from pyqi.core.command import make_command_out_collection_lookup_f
-from pyqi.core.interfaces.optparse.output_handler import print_list_of_strings
-from biom.commands.installation_informer import CommandConstructor
-
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout", "Greg Caporaso", "Daniel McDonald"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-usage_examples = [
-    OptparseUsageExample(ShortDesc="Displaying installation info",
-                         LongDesc="Display biom-format installation "
-                                  "information",
-                         Ex="%prog")
-]
-
-inputs = []
-
-outputs = [
-    OptparseResult(Parameter=cmd_out_lookup('install_info_lines'),
-                   Handler=print_list_of_strings)
-]
diff --git a/biom/interfaces/optparse/config/subset_table.py b/biom/interfaces/optparse/config/subset_table.py
deleted file mode 100644
index bd2e94a..0000000
--- a/biom/interfaces/optparse/config/subset_table.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from pyqi.core.interfaces.optparse import (OptparseOption,
-                                           OptparseUsageExample,
-                                           OptparseResult)
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from pyqi.core.interfaces.optparse.input_handler import load_file_lines
-from biom.interfaces.optparse.input_handler import biom_load_file_contents
-from biom.interfaces.optparse.output_handler import write_subsetted_biom_table
-from biom.commands.table_subsetter import CommandConstructor
-
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout", "Daniel McDonald",
-               "Jose Antonio Navas Molina"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-
-usage_examples = [
-    OptparseUsageExample(ShortDesc="Subsetting a json BIOM table",
-                         LongDesc="Choose a subset of the observations in "
-                                  "table.biom and write them to subset.biom",
-                         Ex="%prog -j table.biom -a observations -s "
-                            "observation_ids.txt -o subset.biom"),
-    OptparseUsageExample(ShortDesc="Subsetting a hdf5 BIOM table",
-                         LongDesc="Choose a subset of the observations in "
-                                  "table.biom and write them to subset.biom",
-                         Ex="%prog -i table.biom -a observations -s "
-                            "observation_ids.txt -o subset.biom")
-]
-
-inputs = [
-    OptparseOption(Parameter=cmd_in_lookup('hdf5_table'),
-                   Type='existing_filepath',
-                   Handler=None, ShortName='i',
-                   Name='input-hdf5-fp',
-                   Help='the input hdf5 BIOM table filepath to subset'),
-
-    OptparseOption(Parameter=cmd_in_lookup('json_table_str'),
-                   Type='existing_filepath',
-                   Handler=biom_load_file_contents, ShortName='j',
-                   Name='input-json-fp',
-                   Help='the input json BIOM table filepath to subset'),
-
-    OptparseOption(Parameter=cmd_in_lookup('axis'), ShortName='a'),
-
-    OptparseOption(Parameter=cmd_in_lookup('ids'),
-                   Type='existing_filepath', Handler=load_file_lines,
-                   ShortName='s', Help='a file containing a single column of '
-                   'IDs to retain (either sample IDs or observation IDs, '
-                   'depending on the axis)'),
-
-    OptparseOption(Parameter=None, Type='new_filepath', ShortName='o',
-                   Name='output-fp', Required=True,
-                   Help='the output BIOM table filepath'),
-]
-
-outputs = [
-    OptparseResult(Parameter=cmd_out_lookup('subsetted_table'),
-                   Handler=write_subsetted_biom_table,
-                   InputName='output-fp')
-]
diff --git a/biom/interfaces/optparse/config/summarize_table.py b/biom/interfaces/optparse/config/summarize_table.py
deleted file mode 100644
index f3538b5..0000000
--- a/biom/interfaces/optparse/config/summarize_table.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from pyqi.core.interfaces.optparse import (OptparseOption,
-                                           OptparseUsageExample,
-                                           OptparseResult)
-from pyqi.core.interfaces.optparse.output_handler import (
-    write_or_print_list_of_strings)
-from biom.commands.table_summarizer import CommandConstructor
-from biom.interfaces.optparse.input_handler import (
-    load_biom_table_with_file_contents)
-
-__author__ = "Greg Caporaso"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Greg Caporaso", "Jai Ram Rideout", "Daniel McDonald"]
-__license__ = "BSD"
-__maintainer__ = "Greg Caporaso"
-__email__ = "gregcaporaso at gmail.com"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-usage_examples = [
-    OptparseUsageExample(ShortDesc="Basic script usage",
-                         LongDesc="Write a summary of table.biom to "
-                                  "table_summary.txt",
-                         Ex="%prog -i table.biom -o table_summary.txt")
-]
-
-inputs = [
-    OptparseOption(Parameter=cmd_in_lookup('table'),
-                   Type="existing_filepath",
-                   Handler=load_biom_table_with_file_contents,
-                   ShortName='i',
-                   Name='input-fp'),
-    OptparseOption(Parameter=cmd_in_lookup('qualitative'),
-                   Type=None,
-                   Action="store_true"),
-    OptparseOption(Parameter=cmd_in_lookup('observations'),
-                   Type=None,
-                   Action='store_true'),
-    OptparseOption(Parameter=None,
-                   Type='new_filepath',
-                   ShortName='o',
-                   Name='output-fp',
-                   Required=False,
-                   Default=None,
-                   Help='the output filepath')
-]
-
-outputs = [
-    OptparseResult(Parameter=cmd_out_lookup('biom_summary'),
-                   Handler=write_or_print_list_of_strings,
-                   InputName='output-fp')
-]
diff --git a/biom/interfaces/optparse/config/validate_table.py b/biom/interfaces/optparse/config/validate_table.py
deleted file mode 100644
index 170f2f3..0000000
--- a/biom/interfaces/optparse/config/validate_table.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-import sys
-from pyqi.core.interfaces.optparse import (OptparseOption,
-                                           OptparseUsageExample,
-                                           OptparseResult)
-from pyqi.core.command import (make_command_in_collection_lookup_f,
-                               make_command_out_collection_lookup_f)
-from pyqi.core.interfaces.optparse.output_handler import print_list_of_strings
-from biom.commands.table_validator import CommandConstructor
-
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout", "Daniel McDonald", "Jorge Cañardo Alastuey"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
-cmd_in_lookup = make_command_in_collection_lookup_f(CommandConstructor)
-cmd_out_lookup = make_command_out_collection_lookup_f(CommandConstructor)
-
-
-def report_table_validity(result_key, data, option_value=None):
-    if data:
-        print "The input file is a valid BIOM-formatted file."
-        sys.exit(0)
-    else:
-        print "The input file is not a valid BIOM-formatted file."
-        sys.exit(1)
-
-usage_examples = [
-    OptparseUsageExample(ShortDesc="Validating a BIOM file",
-                         LongDesc="Validate the contents of table.biom for "
-                                  "adherence to the BIOM format specification",
-                         Ex="%prog -i table.biom")
-]
-
-inputs = [
-    OptparseOption(Parameter=cmd_in_lookup('table'),
-                   Type='existing_filepath',
-                   Handler=None, ShortName='i',
-                   Name='input-fp',
-                   Help='the input filepath to validate against the BIOM '
-                   'format specification'),
-    OptparseOption(Parameter=cmd_in_lookup('format_version'), ShortName='f'),
-
-    OptparseOption(Parameter=cmd_in_lookup('detailed_report'), Type=None,
-                   Action='store_true')
-]
-
-outputs = [
-    OptparseResult(Parameter=cmd_out_lookup('report_lines'),
-                   Handler=print_list_of_strings),
-    OptparseResult(Parameter=cmd_out_lookup('valid_table'),
-                   Handler=report_table_validity)
-]
diff --git a/biom/interfaces/optparse/input_handler.py b/biom/interfaces/optparse/input_handler.py
deleted file mode 100644
index 0945a71..0000000
--- a/biom/interfaces/optparse/input_handler.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-import json
-from biom.util import biom_open
-from biom.parse import MetadataMap, parse_biom_table
-from pyqi.core.interfaces.optparse.input_handler import load_file_contents
-
-__author__ = "Greg Caporaso"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Greg Caporaso", "Jai Ram Rideout"]
-__license__ = "BSD"
-__maintainer__ = "Greg Caporaso"
-__email__ = "gregcaporaso at gmail.com"
-
-
-def biom_load_file_contents(fp):
-    if fp is None:
-        return fp
-    return load_file_contents(fp)
-
-
-def load_hdf5_or_json(fp):
-    """Return a parsed JSON object or an HDF5 object"""
-    with biom_open(fp) as f:
-        if hasattr(f, 'seek'):
-            return json.load(f)
-        else:
-            return f
-
-
-def load_biom_table(biom_fp):
-    """Return a parsed BIOM table."""
-    with biom_open(biom_fp, 'U') as table_f:
-        return parse_biom_table(table_f)
-
-
-def load_biom_table_with_file_contents(biom_fp):
-    """Return a BIOM table and the original open filehandle as a tuple.
-
-    Useful when additional computation needs to be performed on the file
-    contents, such as an MD5 sum.
-
-    WARNING: this function does not close the open filehandle that it returns.
-    Users of this function are responsible for closing the filehandle when done
-    using it!
-    """
-    with biom_open(biom_fp, 'U') as biom_f:
-        table = parse_biom_table(biom_f)
-
-        if hasattr(biom_f, 'seek'):
-            biom_f.seek(0)
-
-        return table, biom_f
-
-
-def load_json_document(fp):
-    """Return a parsed JSON object."""
-    with biom_open(fp, 'U') as f:
-        return json.load(f)
-
-
-def load_metadata(fp):
-    """Parse a sample/observation metadata file, return a ``MetadataMap``.
-
-    If ``fp`` is ``None``, this function will return ``None``.
-    """
-    if fp is None:
-        return None
-    else:
-        with open(fp, 'U') as f:
-            return MetadataMap.from_file(f)
diff --git a/biom/interfaces/optparse/output_handler.py b/biom/interfaces/optparse/output_handler.py
deleted file mode 100644
index 3d1370d..0000000
--- a/biom/interfaces/optparse/output_handler.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-
-# ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# ----------------------------------------------------------------------------
-
-from os.path import exists
-from pyqi.core.exception import IncompetentDeveloperError
-from pyqi.core.interfaces.optparse.output_handler import write_list_of_strings
-from biom.parse import generatedby
-from biom.util import HAVE_H5PY
-
-__author__ = "Greg Caporaso"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Greg Caporaso", "Jai Ram Rideout"]
-__license__ = "BSD"
-__maintainer__ = "Greg Caporaso"
-__email__ = "gregcaporaso at gmail.com"
-
-
-def write_subsetted_biom_table(result_key, data, option_value=None):
-    """Write a string to a file"""
-    if option_value is None:
-        raise IncompetentDeveloperError("Cannot write output without a "
-                                        "filepath.")
-
-    if exists(option_value):
-        raise IOError("Output path '%s' already exists." % option_value)
-
-    table, fmt = data
-
-    if fmt not in ['hdf5', 'json']:
-        raise IncompetentDeveloperError("Unknown file format")
-
-    if fmt == 'json':
-        write_list_of_strings(result_key, table, option_value)
-    else:
-        if HAVE_H5PY:
-            import h5py
-        else:
-            # This should never be raised here
-            raise ImportError("h5py is not available, cannot write HDF5!")
-
-        with h5py.File(option_value, 'w') as f:
-            table.to_hdf5(f, generatedby())
-
-
-def write_biom_table(result_key, data, option_value=None):
-    """Write a string to a file"""
-    if option_value is None:
-        raise IncompetentDeveloperError("Cannot write output without a "
-                                        "filepath.")
-
-    if exists(option_value):
-        raise IOError("Output path '%s' already exists." % option_value)
-
-    table, fmt = data
-
-    if fmt not in ['hdf5', 'json', 'tsv']:
-        raise IncompetentDeveloperError("Unknown file format")
-
-    if fmt == 'hdf5' and not HAVE_H5PY:
-        fmt = 'json'
-
-    if fmt == 'json':
-        with open(option_value, 'w') as f:
-            f.write(table.to_json(generatedby()))
-    elif fmt == 'tsv':
-        with open(option_value, 'w') as f:
-            f.write(table)
-            f.write('\n')
-    else:
-        import h5py
-
-        with h5py.File(option_value, 'w') as f:
-            table.to_hdf5(f, generatedby())
diff --git a/biom/parse.py b/biom/parse.py
index 9a732b0..7cbb243 100644
--- a/biom/parse.py
+++ b/biom/parse.py
@@ -9,12 +9,15 @@
 # ----------------------------------------------------------------------------
 
 from __future__ import division
-from string import maketrans
+
 import numpy as np
+from future.utils import string_types
+
 from biom.exception import BiomParseException, UnknownAxisError
 from biom.table import Table
 from biom.util import biom_open, __version__
 import json
+import collections
 
 __author__ = "Justin Kuczynski"
 __copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
@@ -26,8 +29,8 @@ __url__ = "http://biom-format.org"
 __maintainer__ = "Daniel McDonald"
 __email__ = "daniel.mcdonald at colorado.edu"
 
-MATRIX_ELEMENT_TYPE = {'int': int, 'float': float, 'unicode': unicode,
-                       u'int': int, u'float': float, u'unicode': unicode}
+MATRIX_ELEMENT_TYPE = {'int': int, 'float': float, 'unicode': str,
+                       u'int': int, u'float': float, u'unicode': str}
 
 QUOTE = '"'
 JSON_OPEN = set(["[", "{"])
@@ -124,7 +127,7 @@ def direct_slice_data(biom_str, to_keep, axis):
 
     # determine shape
     raw_shape = shape_kv_pair.split(':')[-1].replace("[", "").replace("]", "")
-    n_rows, n_cols = map(int, raw_shape.split(","))
+    n_rows, n_cols = list(map(int, raw_shape.split(",")))
 
     # slice to just data
     data_start = data_fields.find('[') + 1
@@ -163,13 +166,13 @@ def strip_f(x):
 
 def _remap_axis_sparse_obs(rcv, lookup):
     """Remap a sparse observation axis"""
-    row, col, value = map(strip_f, rcv.split(','))
+    row, col, value = list(map(strip_f, rcv.split(',')))
     return "%s,%s,%s" % (lookup[row], col, value)
 
 
 def _remap_axis_sparse_samp(rcv, lookup):
     """Remap a sparse sample axis"""
-    row, col, value = map(strip_f, rcv.split(','))
+    row, col, value = list(map(strip_f, rcv.split(',')))
     return "%s,%s,%s" % (row, lookup[col], value)
 
 
@@ -245,6 +248,98 @@ def get_axis_indices(biom_str, to_keep, axis):
     return idxs, json.dumps(subset)[1:-1]  # trim off { and }
 
 
+def parse_uc(fh):
+    """ Create a Table object from a uclust/usearch/vsearch uc file.
+
+        Parameters
+        ----------
+        fh : file handle
+            The ``.uc`` file to be parsed.
+
+        Returns
+        -------
+        biom.Table : The resulting BIOM table.
+
+        Raises
+        ------
+        ValueError
+            If a sequence identifier is encountered that doesn't have at least
+            one underscore in it (see Notes).
+
+        Notes
+        -----
+        This function assumes sequence identifiers in this file are in QIIME's
+        "post-split-libraries" format, where the identifiers are of the form
+        ``<sample-id>_<sequence-id>``. Everything before the first underscore
+        will be used as the sample identifier in the resulting ``Table``.
+        The information after the first underscore is not used directly, though
+        the full identifiers of seeds will be used as the observation
+        identifier in the resulting ``Table``.
+
+    """
+    data = collections.defaultdict(int)
+    sample_idxs = {}
+    sample_ids = []
+    observation_idxs = {}
+    observation_ids = []
+    # The types of hit lines we need here are hit (H), seed (S) and
+    # library seed (L). Store these in a set for quick reference.
+    line_types = set('HSL')
+    for line in fh:
+        # determine if the current line is one that we need
+        line = line.strip()
+        if not line:
+            continue
+        fields = line.split('\t')
+
+        line_type = fields[0]
+        if line_type not in line_types:
+            continue
+
+        # grab the fields we care about
+        observation_id = fields[9].split()[0]
+        query_id = fields[8].split()[0]
+
+        if observation_id == '*':
+            # S and L lines don't have a separate observation id
+            observation_id = query_id
+
+        # get the index of the current observation id, or create it if it's
+        # the first time we're seeing this id
+        if observation_id in observation_idxs:
+            observation_idx = observation_idxs[observation_id]
+        else:
+            observation_idx = len(observation_ids)
+            observation_ids.append(observation_id)
+            observation_idxs[observation_id] = observation_idx
+
+        if line_type == 'H' or line_type == 'S':
+            # get the sample id
+            try:
+                underscore_index = query_id.index('_')
+            except ValueError:
+                raise ValueError(
+                 "A query sequence was encountered that does not have an "
+                 "underscore. An underscore is required in all query "
+                 "sequence identifiers to indicate the sample identifier.")
+            # get the sample id and its index, creating the index if it is the
+            # first time we're seeing this id
+            sample_id = query_id[:underscore_index]
+            if sample_id in sample_idxs:
+                sample_idx = sample_idxs[sample_id]
+            else:
+                sample_idx = len(sample_ids)
+                sample_idxs[sample_id] = sample_idx
+                sample_ids.append(sample_id)
+            # increment the count of the current observation in the current
+            # sample by one.
+            data[(observation_idx, sample_idx)] += 1
+        else:
+            # nothing else needs to be done for 'L' records
+            pass
+    return Table(data, observation_ids=observation_ids, sample_ids=sample_ids)
+
+
 def parse_biom_table(fp, ids=None, axis='sample', input_is_dense=False):
     r"""Parses the biom table stored in the filepath `fp`
 
@@ -423,7 +518,7 @@ class MetadataMap(dict):
                     comments.append(line)
             else:
                 # Will add empty string to empty fields
-                tmp_line = map(strip_f, line.split('\t'))
+                tmp_line = list(map(strip_f, line.split('\t')))
                 if len(tmp_line) < len(header):
                     tmp_line.extend([''] * (len(header) - len(tmp_line)))
                 mapping_data.append(tmp_line)
@@ -487,11 +582,10 @@ def biom_meta_to_string(metadata, replace_str=':'):
     # Note that since ';' and '|' are used as seperators we must replace them
     # if they exist
 
-    # metadata is just a string (not a list)
-    if isinstance(metadata, str) or isinstance(metadata, unicode):
+    if isinstance(metadata, string_types):
         return metadata.replace(';', replace_str)
     elif isinstance(metadata, list):
-        transtab = maketrans(';|', ''.join([replace_str, replace_str]))
+        transtab = bytes.maketrans(';|', ''.join([replace_str, replace_str]))
         # metadata is list of lists
         if isinstance(metadata[0], list):
             new_metadata = []
diff --git a/biom/table.py b/biom/table.py
index 04b0946..8caac29 100644
--- a/biom/table.py
+++ b/biom/table.py
@@ -179,16 +179,18 @@ from datetime import datetime
 from json import dumps
 from functools import reduce
 from operator import itemgetter, add
-from itertools import izip
+from future.builtins import zip
+from future.utils import viewitems
 from collections import defaultdict, Hashable, Iterable
 from numpy import ndarray, asarray, zeros, newaxis
 from scipy.sparse import coo_matrix, csc_matrix, csr_matrix, isspmatrix, vstack
 
+from future.utils import string_types
 from biom.exception import TableException, UnknownAxisError, UnknownIDError
 from biom.util import (get_biom_format_version_string,
                        get_biom_format_url_string, flatten, natsort,
                        prefer_self, index_list, H5PY_VLEN_STR, HAVE_H5PY,
-                       H5PY_VLEN_UNICODE, __format_version__)
+                       __format_version__)
 from biom.err import errcheck
 from ._filter import _filter
 from ._transform import _transform
@@ -207,8 +209,8 @@ __maintainer__ = "Daniel McDonald"
 __email__ = "daniel.mcdonald at colorado.edu"
 
 
-MATRIX_ELEMENT_TYPE = {'int': int, 'float': float, 'unicode': unicode,
-                       u'int': int, u'float': float, u'unicode': unicode}
+MATRIX_ELEMENT_TYPE = {'int': int, 'float': float, 'unicode': str,
+                       u'int': int, u'float': float, u'unicode': str}
 
 
 def general_parser(x):
@@ -217,7 +219,13 @@ def general_parser(x):
 
 def vlen_list_of_str_parser(value):
     """Parses the taxonomy value"""
-    new_value = [v for v in value if v]
+    new_value = []
+    for v in value:
+        if v:
+            if isinstance(v, bytes):
+                v = v.decode('utf8')
+            new_value.append(v)
+
     return new_value if new_value else None
 
 
@@ -226,15 +234,10 @@ def general_formatter(grp, header, md, compression):
     test_val = md[0][header]
     shape = (len(md),)
     name = 'metadata/%s' % header
-    if isinstance(test_val, unicode):
-        grp.create_dataset(name, shape=shape,
-                           dtype=H5PY_VLEN_UNICODE,
-                           compression=compression)
-        grp[name][:] = [m[header] for m in md]
-    elif isinstance(test_val, str):
+    if isinstance(test_val, string_types):
         grp.create_dataset(name, shape=shape,
                            dtype=H5PY_VLEN_STR,
-                           data=[m[header] for m in md],
+                           data=[m[header].encode('utf8') for m in md],
                            compression=compression)
     else:
         grp.create_dataset(
@@ -271,7 +274,7 @@ def vlen_list_of_str_formatter(grp, header, md, compression):
         if m[header] is None:
             continue
         value = np.asarray(m[header])
-        data[i, :len(value)] = value
+        data[i, :len(value)] = [v.encode('utf8') for v in value]
     # Change the None entries on data to empty strings ""
     data = np.where(data == np.array(None), "", data)
     grp.create_dataset(
@@ -581,7 +584,7 @@ class Table(object):
         """
         metadata = self.metadata(axis=axis)
         if metadata is not None:
-            for id_, md_entry in md.iteritems():
+            for id_, md_entry in viewitems(md):
                 if self.exists(id_, axis=axis):
                     idx = self.index(id_, axis=axis)
                     metadata[idx].update(md_entry)
@@ -835,6 +838,65 @@ class Table(object):
                               self.ids()[:], self.ids(axis='observation')[:],
                               sample_md_copy, obs_md_copy, self.table_id)
 
+    def head(self, n=5, m=5):
+        """Get the first n rows and m columns from self
+
+        Parameters
+        ----------
+        n : int, optional
+            The number of rows (observations) to get. This number must be
+            greater than 0. If not specified, 5 rows will be retrieved.
+
+        m : int, optional
+            The number of columns (samples) to get. This number must be
+            greater than 0. If not specified, 5 columns will be
+            retrieved.
+
+        Notes
+        -----
+        Like `head` for Linux like systems, requesting more rows (or columns)
+        than exists will silently work.
+
+        Raises
+        ------
+        IndexError
+            If `n` or `m` are <= 0.
+
+        Returns
+        -------
+        Table
+            The subset table.
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> from biom.table import Table
+        >>> data = np.arange(100).reshape(5, 20)
+        >>> obs_ids = ['O%d' % i for i in range(1, 6)]
+        >>> samp_ids = ['S%d' % i for i in range(1, 21)]
+        >>> table = Table(data, obs_ids, samp_ids)
+        >>> print table.head()  # doctest: +NORMALIZE_WHITESPACE
+        # Constructed from biom file
+        #OTU ID S1  S2  S3  S4  S5
+        O1  0.0 1.0 2.0 3.0 4.0
+        O2  20.0 21.0 22.0 23.0 24.0
+        O3  40.0 41.0 42.0 43.0 44.0
+        O4  60.0 61.0 62.0 63.0 64.0
+        O5  80.0 81.0 82.0 83.0 84.0
+
+        """
+        if n <= 0:
+            raise IndexError("n cannot be <= 0.")
+
+        if m <= 0:
+            raise IndexError("m cannot be <= 0.")
+
+        row_ids = self.ids(axis='observation')[:n]
+        col_ids = self.ids(axis='sample')[:m]
+
+        table = self.filter(row_ids, axis='observation', inplace=False)
+        return table.filter(col_ids, axis='sample')
+
     def group_metadata(self, axis='sample'):
         """Return the group metadata of the given axis
 
@@ -984,13 +1046,13 @@ class Table(object):
         """
         updated_ids = zeros(self.ids(axis=axis).size, dtype=object)
         for idx, old_id in enumerate(self.ids(axis=axis)):
-            new_id = id_map.get(old_id, old_id)
-            if strict and new_id is old_id:  # same object, not just equality
+            if strict and old_id not in id_map:
                 raise TableException(
                     "Mapping not provided for %s identifier: %s. If this "
                     "identifier should not be updated, pass strict=False."
                     % (axis, old_id))
-            updated_ids[idx] = new_id
+
+            updated_ids[idx] = id_map.get(old_id, old_id)
 
         # prepare the result object and update the ids along the specified
         # axis
@@ -1238,9 +1300,9 @@ class Table(object):
         """
         return id in self._index(axis=axis)
 
-    def delimited_self(self, delim='\t', header_key=None, header_value=None,
+    def delimited_self(self, delim=u'\t', header_key=None, header_value=None,
                        metadata_formatter=str,
-                       observation_column_name='#OTU ID'):
+                       observation_column_name=u'#OTU ID'):
         """Return self as a string in a delimited form
 
         Default str output for the Table is just row/col ids and table data
@@ -1264,11 +1326,16 @@ class Table(object):
             OTU1\t10\t2
             OTU2\t4\t8
         """
+        def to_utf8(i):
+            if isinstance(i, bytes):
+                return i.decode('utf8')
+            else:
+                return str(i)
+
         if self.is_empty():
             raise TableException("Cannot delimit self if I don't have data...")
 
-        samp_ids = delim.join(map(str, self.ids()))
-
+        samp_ids = delim.join([to_utf8(i) for i in self.ids()])
         # 17 hrs of straight programming later...
         if header_key is not None:
             if header_value is None:
@@ -1281,26 +1348,25 @@ class Table(object):
                     "You need to specify both header_key and header_value")
 
         if header_value:
-            output = ['# Constructed from biom file',
-                      '%s%s%s\t%s' % (observation_column_name, delim, samp_ids,
-                                      header_value)]
+            output = [u'# Constructed from biom file',
+                      u'%s%s%s\t%s' % (observation_column_name, delim,
+                                       samp_ids, header_value)]
         else:
             output = ['# Constructed from biom file',
                       '%s%s%s' % (observation_column_name, delim, samp_ids)]
-
         obs_metadata = self.metadata(axis='observation')
         for obs_id, obs_values in zip(self.ids(axis='observation'),
                                       self._iter_obs()):
             str_obs_vals = delim.join(map(str, self._to_dense(obs_values)))
-
+            obs_id = to_utf8(obs_id)
             if header_key and obs_metadata is not None:
                 md = obs_metadata[self._obs_index[obs_id]]
                 md_out = metadata_formatter(md.get(header_key, None))
                 output.append(
-                    '%s%s%s\t%s' %
+                    u'%s%s%s\t%s' %
                     (obs_id, delim, str_obs_vals, md_out))
             else:
-                output.append('%s%s%s' % (obs_id, delim, str_obs_vals))
+                output.append(u'%s%s%s' % (obs_id, delim, str_obs_vals))
 
         return '\n'.join(output)
 
@@ -1588,7 +1654,7 @@ class Table(object):
 
         iter_ = self.iter_data(axis=axis, dense=dense)
 
-        return izip(iter_, ids, metadata)
+        return zip(iter_, ids, metadata)
 
     def iter_pairwise(self, dense=True, axis='sample', tri=True, diag=False):
         """Pairwise iteration over self
@@ -2011,7 +2077,7 @@ class Table(object):
 
         md = self.metadata(axis=self._invert_axis(axis))
 
-        for part, (ids, values, metadata) in partitions.iteritems():
+        for part, (ids, values, metadata) in viewitems(partitions):
             if axis == 'sample':
                 data = self._conv_to_self_type(values, transpose=True)
                 samp_ids = ids
@@ -2213,12 +2279,12 @@ class Table(object):
             new_md = {}
             md_count = {}
 
-            for id_, md in izip(*axis_ids_md(self)):
+            for id_, md in zip(*axis_ids_md(self)):
                 md_iter = f(id_, md)
                 num_md = 0
                 while True:
                     try:
-                        pathway, partition = md_iter.next()
+                        pathway, partition = next(md_iter)
                     except IndexError:
                         # if a pathway is incomplete
                         if strict:
@@ -2255,7 +2321,7 @@ class Table(object):
 
                 while True:
                     try:
-                        pathway, part = md_iter.next()
+                        pathway, part = next(md_iter)
                     except IndexError:
                         # if a pathway is incomplete
                         if strict:
@@ -2276,11 +2342,11 @@ class Table(object):
 
             if include_collapsed_metadata:
                 # reassociate pathway information
-                for k, i in sorted(idx_lookup.iteritems(), key=itemgetter(1)):
+                for k, i in sorted(viewitems(idx_lookup), key=itemgetter(1)):
                     collapsed_md.append({one_to_many_md_key: new_md[k]})
 
             # get the new sample IDs
-            collapsed_ids = [k for k, i in sorted(idx_lookup.iteritems(),
+            collapsed_ids = [k for k, i in sorted(viewitems(idx_lookup),
                                                   key=itemgetter(1))]
 
             # convert back to self type
@@ -3205,6 +3271,9 @@ html
             # fetch all of the IDs
             ids = grp['ids'][:]
 
+            if ids.size > 0 and isinstance(ids[0], bytes):
+                ids = np.array([i.decode('utf8') for i in ids])
+
             parser = defaultdict(lambda: general_parser)
             parser['taxonomy'] = vlen_list_of_str_parser
             parser['KEGG_Pathways'] = vlen_list_of_str_parser
@@ -3213,10 +3282,10 @@ html
 
             # fetch ID specific metadata
             md = [{} for i in range(len(ids))]
-            for category, dset in grp['metadata'].iteritems():
+            for category, dset in viewitems(grp['metadata']):
                 parse_f = parser[category]
                 data = dset[:]
-                for md_dict, data_row in izip(md, data):
+                for md_dict, data_row in zip(md, data):
                     md_dict[category] = parse_f(data_row)
 
             # If there was no metadata on the axis, set it up as none
@@ -3253,6 +3322,7 @@ html
                     # Retrieve only the ids that we are interested on
                     ids = source_ids[idx]
                     # Check that all desired ids have been found on source ids
+
                     if ids.shape != desired_ids.shape:
                         raise ValueError("The following ids could not be "
                                          "found in the biom table: %s" %
@@ -3484,7 +3554,7 @@ html
                 # is cleaner, as is the parse
                 grp.create_dataset('ids', shape=(len_ids,),
                                    dtype=H5PY_VLEN_STR,
-                                   data=[str(i) for i in ids],
+                                   data=[i.encode('utf8') for i in ids],
                                    compression=compression)
             else:
                 # Empty H5PY_VLEN_STR datasets are not supported.
@@ -3641,29 +3711,28 @@ html
         str
             A JSON-formatted string representing the biom table
         """
-        if (not isinstance(generated_by, str) and
-                not isinstance(generated_by, unicode)):
+        if not isinstance(generated_by, string_types):
             raise TableException("Must specify a generated_by string")
 
         # Fill in top-level metadata.
         if direct_io:
-            direct_io.write('{')
-            direct_io.write('"id": "%s",' % str(self.table_id))
+            direct_io.write(u'{')
+            direct_io.write(u'"id": "%s",' % str(self.table_id))
             direct_io.write(
-                '"format": "%s",' %
+                u'"format": "%s",' %
                 get_biom_format_version_string((1, 0)))  # JSON table -> 1.0.0
             direct_io.write(
-                '"format_url": "%s",' %
+                u'"format_url": "%s",' %
                 get_biom_format_url_string())
-            direct_io.write('"generated_by": "%s",' % generated_by)
-            direct_io.write('"date": "%s",' % datetime.now().isoformat())
+            direct_io.write(u'"generated_by": "%s",' % generated_by)
+            direct_io.write(u'"date": "%s",' % datetime.now().isoformat())
         else:
-            id_ = '"id": "%s",' % str(self.table_id)
-            format_ = '"format": "%s",' % get_biom_format_version_string(
+            id_ = u'"id": "%s",' % str(self.table_id)
+            format_ = u'"format": "%s",' % get_biom_format_version_string(
                 (1, 0))  # JSON table -> 1.0.0
-            format_url = '"format_url": "%s",' % get_biom_format_url_string()
-            generated_by = '"generated_by": "%s",' % generated_by
-            date = '"date": "%s",' % datetime.now().isoformat()
+            format_url = u'"format_url": "%s",' % get_biom_format_url_string()
+            generated_by = u'"generated_by": "%s",' % generated_by
+            date = u'"date": "%s",' % datetime.now().isoformat()
 
         # Determine if we have any data in the matrix, and what the shape of
         # the matrix is.
@@ -3681,30 +3750,30 @@ html
 
         # Determine the type of elements the matrix is storing.
         if isinstance(test_element, int):
-            matrix_element_type = "int"
+            matrix_element_type = u"int"
         elif isinstance(test_element, float):
-            matrix_element_type = "float"
-        elif isinstance(test_element, unicode):
-            matrix_element_type = "unicode"
+            matrix_element_type = u"float"
+        elif isinstance(test_element, string_types):
+            matrix_element_type = u"str"
         else:
             raise TableException("Unsupported matrix data type.")
 
         # Fill in details about the matrix.
         if direct_io:
             direct_io.write(
-                '"matrix_element_type": "%s",' %
+                u'"matrix_element_type": "%s",' %
                 matrix_element_type)
-            direct_io.write('"shape": [%d, %d],' % (num_rows, num_cols))
+            direct_io.write(u'"shape": [%d, %d],' % (num_rows, num_cols))
         else:
-            matrix_element_type = '"matrix_element_type": "%s",' % \
+            matrix_element_type = u'"matrix_element_type": "%s",' % \
                 matrix_element_type
-            shape = '"shape": [%d, %d],' % (num_rows, num_cols)
+            shape = u'"shape": [%d, %d],' % (num_rows, num_cols)
 
         # Fill in the table type
         if self.type is None:
-            type_ = '"type": null,'
+            type_ = u'"type": null,'
         else:
-            type_ = '"type": "%s",' % self.type
+            type_ = u'"type": "%s",' % self.type
 
         if direct_io:
             direct_io.write(type_)
@@ -3712,24 +3781,24 @@ html
         # Fill in details about the rows in the table and fill in the matrix's
         # data. BIOM 2.0+ is now only sparse
         if direct_io:
-            direct_io.write('"matrix_type": "sparse",')
-            direct_io.write('"data": [')
+            direct_io.write(u'"matrix_type": "sparse",')
+            direct_io.write(u'"data": [')
         else:
-            matrix_type = '"matrix_type": "sparse",'
-            data = ['"data": [']
+            matrix_type = u'"matrix_type": "sparse",'
+            data = [u'"data": [']
 
         max_row_idx = len(self.ids(axis='observation')) - 1
         max_col_idx = len(self.ids()) - 1
-        rows = ['"rows": [']
+        rows = [u'"rows": [']
         have_written = False
         for obs_index, obs in enumerate(self.iter(axis='observation')):
             # i'm crying on the inside
             if obs_index != max_row_idx:
-                rows.append('{"id": %s, "metadata": %s},' % (dumps(obs[1]),
-                                                             dumps(obs[2])))
-            else:
-                rows.append('{"id": %s, "metadata": %s}],' % (dumps(obs[1]),
+                rows.append(u'{"id": %s, "metadata": %s},' % (dumps(obs[1]),
                                                               dumps(obs[2])))
+            else:
+                rows.append(u'{"id": %s, "metadata": %s}],' % (dumps(obs[1]),
+                                                               dumps(obs[2])))
 
             # turns out its a pain to figure out when to place commas. the
             # simple work around, at the expense of a little memory
@@ -3738,55 +3807,55 @@ html
             built_row = []
             for col_index, val in enumerate(obs[0]):
                 if float(val) != 0.0:
-                    built_row.append("[%d,%d,%r]" % (obs_index, col_index,
-                                                     val))
+                    built_row.append(u"[%d,%d,%r]" % (obs_index, col_index,
+                                                      val))
             if built_row:
                 # if we have written a row already, its safe to add a comma
                 if have_written:
                     if direct_io:
-                        direct_io.write(',')
+                        direct_io.write(u',')
                     else:
-                        data.append(',')
+                        data.append(u',')
                 if direct_io:
-                    direct_io.write(','.join(built_row))
+                    direct_io.write(u','.join(built_row))
                 else:
-                    data.append(','.join(built_row))
+                    data.append(u','.join(built_row))
 
                 have_written = True
 
         # finalize the data block
         if direct_io:
-            direct_io.write("],")
+            direct_io.write(u"],")
         else:
-            data.append("],")
+            data.append(u"],")
 
         # Fill in details about the columns in the table.
-        columns = ['"columns": [']
+        columns = [u'"columns": [']
         for samp_index, samp in enumerate(self.iter()):
             if samp_index != max_col_idx:
-                columns.append('{"id": %s, "metadata": %s},' % (
+                columns.append(u'{"id": %s, "metadata": %s},' % (
                     dumps(samp[1]), dumps(samp[2])))
             else:
-                columns.append('{"id": %s, "metadata": %s}]' % (
+                columns.append(u'{"id": %s, "metadata": %s}]' % (
                     dumps(samp[1]), dumps(samp[2])))
 
-        if rows[0] == '"rows": [' and len(rows) == 1:
+        if rows[0] == u'"rows": [' and len(rows) == 1:
             # empty table case
-            rows = ['"rows": [],']
-            columns = ['"columns": []']
+            rows = [u'"rows": [],']
+            columns = [u'"columns": []']
 
-        rows = ''.join(rows)
-        columns = ''.join(columns)
+        rows = u''.join(rows)
+        columns = u''.join(columns)
 
         if direct_io:
             direct_io.write(rows)
             direct_io.write(columns)
-            direct_io.write('}')
+            direct_io.write(u'}')
         else:
-            return "{%s}" % ''.join([id_, format_, format_url, matrix_type,
-                                     generated_by, date, type_,
-                                     matrix_element_type, shape,
-                                     ''.join(data), rows, columns])
+            return u"{%s}" % ''.join([id_, format_, format_url, matrix_type,
+                                      generated_by, date, type_,
+                                      matrix_element_type, shape,
+                                      u''.join(data), rows, columns])
 
     @staticmethod
     def from_tsv(lines, obs_mapping, sample_mapping,
@@ -3964,9 +4033,9 @@ html
             obs_ids.append(fields[0])
 
             if last_column_is_numeric:
-                values = map(dtype, fields[1:])
+                values = list(map(dtype, fields[1:]))
             else:
-                values = map(dtype, fields[1:-1])
+                values = list(map(dtype, fields[1:-1]))
 
                 if md_parse is not None:
                     metadata.append(md_parse(fields[-1]))
@@ -4022,7 +4091,7 @@ html
         O1	0.0	0.0	1.0
         O2	1.0	3.0	42.0
         """
-        return self.delimited_self('\t', header_key, header_value,
+        return self.delimited_self(u'\t', header_key, header_value,
                                    metadata_formatter,
                                    observation_column_name)
 
@@ -4078,7 +4147,7 @@ def list_list_to_sparse(data, dtype=float, shape=None):
     scipy.csr_matrix
         The newly generated matrix
     """
-    rows, cols, values = izip(*data)
+    rows, cols, values = zip(*data)
 
     if shape is None:
         n_rows = max(rows) + 1
@@ -4272,7 +4341,7 @@ def dict_to_sparse(data, dtype=float, shape=None):
     rows = []
     cols = []
     vals = []
-    for (r, c), v in data.iteritems():
+    for (r, c), v in viewitems(data):
         rows.append(r)
         cols.append(c)
         vals.append(v)
diff --git a/biom/util.py b/biom/util.py
index 8e73583..7ecf6df 100644
--- a/biom/util.py
+++ b/biom/util.py
@@ -9,6 +9,7 @@
 # ----------------------------------------------------------------------------
 
 import os
+import sys
 import inspect
 from contextlib import contextmanager
 
@@ -22,8 +23,13 @@ from gzip import open as gzip_open
 try:
     import h5py
     HAVE_H5PY = True
-    H5PY_VLEN_STR = h5py.special_dtype(vlen=str)
-    H5PY_VLEN_UNICODE = h5py.special_dtype(vlen=unicode)
+
+    if sys.version_info.major == 2:
+        H5PY_VLEN_STR = h5py.special_dtype(vlen=unicode)  # noqa
+        H5PY_VLEN_UNICODE = h5py.special_dtype(vlen=unicode)  # noqa
+    else:
+        H5PY_VLEN_STR = h5py.special_dtype(vlen=str)
+        H5PY_VLEN_UNICODE = h5py.special_dtype(vlen=str)
 
 except ImportError:
     HAVE_H5PY = False
@@ -41,7 +47,7 @@ __url__ = "http://biom-format.org"
 __maintainer__ = "Daniel McDonald"
 __email__ = "daniel.mcdonald at colorado.edu"
 __format_version__ = (2, 1)
-__version__ = "2.1.4"
+__version__ = "2.1.5"
 
 
 def generate_subsamples(table, n, axis='sample', by_id=False):
@@ -126,7 +132,7 @@ def unzip(items):
     BSD license).
     """
     if items:
-        return map(list, zip(*items))
+        return list(map(list, zip(*list(items))))
     else:
         return []
 
@@ -321,7 +327,7 @@ def compute_counts_per_sample_stats(table, binary_counts=False):
             sample_counts[sample_id] = (count_vector != 0).sum()
         else:
             sample_counts[sample_id] = float(count_vector.sum())
-    counts = sample_counts.values()
+    counts = list(sample_counts.values())
 
     if len(counts) == 0:
         return (0, 0, 0, 0, sample_counts)
@@ -349,7 +355,7 @@ def safe_md5(open_file, block_size=2 ** 20):
 
     # While a little hackish, this allows this code to
     # safely work either with a file object or a list of lines.
-    if isinstance(open_file, file):
+    if hasattr(open_file, 'read'):
         data_getter = open_file.read
         data_getter_i = block_size
     elif isinstance(open_file, list):
@@ -367,7 +373,7 @@ def safe_md5(open_file, block_size=2 ** 20):
     while data:
         data = data_getter(data_getter_i)
         if data:
-            result.update(data)
+            result.update(data.encode('utf-8'))
     return result.hexdigest()
 
 
@@ -381,7 +387,7 @@ def is_gzip(fp):
     project, but we obtained permission from the authors of this function to
     port it to the BIOM Format project (and keep it under BIOM's BSD license).
     """
-    return open(fp, 'rb').read(2) == '\x1f\x8b'
+    return open(fp, 'rb').read(2) == b'\x1f\x8b'
 
 
 @contextmanager
@@ -504,6 +510,6 @@ def is_hdf5_file(fp):
     bool
         Whether the file is an HDF5 file
     """
-    with open(fp) as f:
+    with open(fp, 'rb') as f:
         # from the HDF5 documentation about format signature
-        return f.read(8) == '\x89HDF\r\n\x1a\n'
+        return f.read(8) == b'\x89HDF\r\n\x1a\n'
diff --git a/doc/conf.py b/doc/conf.py
index f26f743..b8f3658 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -66,8 +66,8 @@ copyright = u'2011-2015, The BIOM Format Development Team'
 # built documents.
 #
 # The full version, including alpha/beta/rc tags.
-version = "2.1.4"
-release = "2.1.4"
+version = "2.1.5"
+release = "2.1.5"
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/doc/index.rst b/doc/index.rst
index 704c2ac..92308ed 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -9,9 +9,13 @@ The `BIOM file format <http://www.biom-format.org>`_ (canonically pronounced `bi
 
 The `BIOM format <http://www.biom-format.org>`_ is designed for general use in broad areas of comparative -omics. For example, in marker-gene surveys, the primary use of this format is to represent OTU tables: the observations in this case are OTUs and the matrix contains counts corresponding to the number of times each OTU is observed in each sample. With respect to metagenome data, this format would be used to represent metagenome tables: the observations in this case might correspond  [...]
 
-There are two components to the BIOM project: first is the `definition of the BIOM format <./documentation/biom_format.html>`_, and second is `development of support objects <./documentation/table_objects.html>`_ in multiple programming languages to support the use of BIOM in diverse bioinformatics applications. The version of the BIOM file format is independent of the version of the `biom-format` software.
+The BIOM project consists of the following components:
 
-There are official implementations of BIOM format support objects (APIs) in the Python and R programming languages. The rest of this site contains details about the BIOM file format (which is independent of the API) and the Python ``biom-format`` API. For more details about the R API, please see the `CRAN biom package <http://cran.r-project.org/web/packages/biom/index.html>`_.
+* `definition of the BIOM file format <./documentation/biom_format.html>`_;
+* command line interface (CLI) for working with BIOM files, including `converting between file formats <./documentation/biom_conversion.html>`_, `adding metadata to BIOM files <./documentation/adding_metadata.html>`_, and `summarizing BIOM files <./documentation/summarizing_biom_tables.html>`_ (run ``biom`` to see the full list of commands);
+* application programming interface (API) for working with BIOM files in multiple programming languages (including Python and R).
+
+The ``biom-format`` package provides a command line interface and Python API for working with BIOM files. The rest of this site contains details about the BIOM file format (which is independent of the API) and the Python ``biom-format`` package. For more details about the R API, please see the `CRAN biom package <http://cran.r-project.org/web/packages/biom/index.html>`_.
 
 Projects using the BIOM format
 ==============================
@@ -25,6 +29,7 @@ Projects using the BIOM format
 * `VAMPS <http://vamps.mbl.edu/>`_
 * `metagenomeSeq <http://www.bioconductor.org/packages/release/bioc/html/metagenomeSeq.html>`_
 * `Phinch <http://phinch.org>`_
+* `RDP Classifier <https://github.com/rdpstaff/classifier>`_
 
 If you are using BIOM in your project, and would like your project to be listed, please submit a `pull request <https://github.com/biocore/biom-format/pulls>`_ to the BIOM project. More information on `submitting pull requests can be found here <https://help.github.com/articles/using-pull-requests>`_.
 
@@ -42,77 +47,30 @@ BIOM version
 
 The latest official version of the biom-format project is |release| and of the BIOM file format is 2.0. Details on the `file format can be found here <./documentation/biom_format.html>`_.
 
-Installing the biom-format project
-==================================
-
-To install the ``biom-format`` project, you can download the `latest version here <https://pypi.python.org/pypi/biom-format/>`_, or work with the development version. Generally we recommend working with the release version as it will be more stable, but if you want access to the latest features (and can tolerate some instability) you should work with the development version.
-
-The biom-format project has the following dependencies:
-
-	*  `Python <http://www.python.org/>`_ >= 2.7 and < 3.0
-	* `numpy <http://www.numpy.org/>`_ >= 1.7.0
-	* `pyqi <http://pyqi.readthedocs.org>`_ 0.3.2
-	* `scipy <http://www.scipy.org/>`_ >= 0.13.0 
-	* `h5py <http://www.h5py.org/>`_ >= 2.20.0 (optional; must be installed if creating or reading HDF5 formatted files)
-
-The easiest way to install the latest version of the biom-format project and its required dependencies is via pip::
-
-	pip install numpy
-	pip install biom-format
-
-That's it!
-
-If you decided not to install biom-format using pip, it is also possible to manually install the latest release. We'll illustrate the install process in the ``$HOME/code`` directory. You can either work in this directory on your system (creating it, if necessary, by running ``mkdir $HOME/code``) or replace all occurrences of ``$HOME/code`` in the following instructions with your working directory. Please note that ``numpy`` must be in your installed prior to installing ``biom-format``. C [...]
-
-	cd $HOME/code
-
-Download the `latest release, which can be found here <https://pypi.python.org/pypi/biom-format>`_. After downloading, unpack and install (note: x.y.z refers to the downloaded version)::
+Installing the ``biom-format`` Python package
+=============================================
 
-	tar xzf biom-format-x.y.z.tar.gz
-	cd $HOME/code/biom-format-x.y.z
+To install the latest release of the ``biom-format`` Python package::
 
-Alternatively, to install the development version, pull it from GitHub, and change to the resulting directory::
+    pip install numpy
+    pip install biom-format
 
-	git clone git://github.com/biocore/biom-format.git
-	cd $HOME/code/biom-format
+To work with BIOM 2.0+ files::
 
-To install (either the development or release version), follow these steps::
+    pip install h5py
 
-	sudo python setup.py install
+To see a list of all ``biom`` commands, run::
 
-If you do not have sudo access on your system (or don't want to install the ``biom-format`` project in the default location) you'll need to install the library code and scripts in specified directories, and then tell your system where to look for those files. You can do this as follows::
+    biom
 
-	echo "export PATH=$HOME/bin/:$PATH" >> $HOME/.bashrc
-	echo "export PYTHONPATH=$HOME/lib/:$PYTHONPATH" >> $HOME/.bashrc
-	mkdir -p $HOME/bin $HOME/lib/
-	source $HOME/.bashrc
-	python setup.py install --install-scripts=$HOME/bin/ --install-purelib=$HOME/lib/ --install-lib=$HOME/lib/
+To enable Bash tab completion of ``biom`` commands, add the following line to ``$HOME/.bashrc`` (if on Linux) or ``$HOME/.bash_profile`` (if on Mac OS X)::
 
-You should then have access to the biom-format project. You can test this by running the following command::
-	
-	python -c "from biom import __version__; print __version__"
+    eval "$(_BIOM_COMPLETE=source biom)"
 
-You should see the current version of the biom-format project.
-
-Next you can run::
-
-	which biom
-
-You should get a file path ending with ``biom`` printed to your screen if it is installed correctly. Finally, to see a list of all ``biom`` commands, run::
-
-	biom
-
-Enabling tab completion of biom commands
-----------------------------------------
-
-The ``biom`` command referenced in the previous section is a driver for commands in biom-format, powered by `the pyqi project <http://biocore.github.io/pyqi>`_. You can enable tab completion of biom command names and command options (meaning that when you begin typing the name of a command or option you can auto-complete it by hitting the *tab* key) by following a few simple steps from the pyqi documentation. While this step is optional, tab completion is very convenient so it's worth enabling.
-
-To enable tab completion, follow the steps outlined under `Configuring bash completion <http://biocore.github.io/pyqi/doc/tutorials/defining_your_command_driver.html#configuring-bash-completion>`_ in the pyqi install documentation, substituting ``biom`` for ``my-project`` and ``my_project`` in all commands. After completing those steps and closing and re-opening your terminal, auto-completion should be enabled.
-
-BIOM format in R
-================
+Installing the ``biom`` R package
+=================================
 
-There is also a BIOM format package for R, called ``biom``. This package includes basic tools for reading biom-format files, accessing and subsetting data tables from a biom object, as well as limited support for writing a biom-object back to a biom-format file. The design of this API is intended to match the python API and other tools included with the biom-format project, but with a decidedly "R flavor" that should be familiar to R users. This includes S4 classes and methods, as well a [...]
+There is also a BIOM format package for R called ``biom``. This package includes basic tools for reading biom-format files, accessing and subsetting data tables from a biom object, as well as limited support for writing a biom-object back to a biom-format file. The design of this API is intended to match the python API and other tools included with the biom-format project, but with a decidedly "R flavor" that should be familiar to R users. This includes S4 classes and methods, as well as [...]
 
 To install the latest stable release of the ``biom`` package enter the following command from within an R session::
 
@@ -128,7 +86,7 @@ Please post any support or feature requests and bugs to `the biom issue tracker
 
 See `the biom project on GitHub <https://github.com/joey711/biom/>`_ for further details, or if you would like to contribute.
 
-Note that the licenses between the ``biom`` R package (GPL-2) and the other biom-format software (Modified BSD) are different.
+Note that the licenses between the ``biom`` R package (GPL-2) and the ``biom-format`` Python package (Modified BSD) are different.
 
 Citing the BIOM project
 =======================
@@ -142,4 +100,4 @@ You can cite the BIOM format as follows (`link <http://www.gigasciencejournal.co
 Development team
 ================
 
-The biom-format project was conceived of and developed by the `QIIME <http://www.qiime.org>`_, `MG-RAST <http://metagenomics.anl.gov>`_, and `VAMPS <http://vamps.mbl.edu/>`_ development groups to support interoperability of our software packages. If you have questions about the biom-format project you can contact gregcaporaso at gmail.com.
+The biom-format project was conceived of and developed by the `QIIME <http://www.qiime.org>`_, `MG-RAST <http://metagenomics.anl.gov>`_, and `VAMPS <http://vamps.mbl.edu/>`_ development groups to support interoperability of our software packages. If you have questions about the biom-format project please post them on the `QIIME Forum <http://forum.qiime.org>`_.
diff --git a/scripts/biom b/scripts/biom
deleted file mode 100755
index 369c0bc..0000000
--- a/scripts/biom
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-# __author__ = "Greg Caporaso"
-# __copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-# __credits__ = ["Daniel McDonald",
-#                "Greg Caporaso",
-#                "Jai Ram Rideout"]
-# __license__ = "BSD"
-# __url__ = "http://biom-format.org"
-# __version__ = "2.1.4"
-# __maintainer__ = "Greg Caporaso"
-# __email__ = "gregcaporaso at gmail.com"
-
-exec pyqi --driver-name biom --command-config-module biom.interfaces.optparse.config -- "$@"
diff --git a/scripts/serve-biom b/scripts/serve-biom
deleted file mode 100644
index a4ca323..0000000
--- a/scripts/serve-biom
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-exec pyqi serve-html-interface -m biom.interfaces.html.config "$@"
diff --git a/setup.py b/setup.py
index f87da11..0734278 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@
 # -*- coding: utf-8 -*-
 
 # ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
@@ -10,9 +10,9 @@
 # ----------------------------------------------------------------------------
 
 import os
-from setuptools import setup
+import sys
+from setuptools import setup, find_packages
 from setuptools.extension import Extension
-from glob import glob
 
 try:
     import numpy as np
@@ -35,7 +35,7 @@ __copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
 __credits__ = ["Greg Caporaso", "Daniel McDonald", "Jose Clemente",
                "Jai Ram Rideout", "Jorge Cañardo Alastuey", "Michael Hall"]
 __license__ = "BSD"
-__version__ = "2.1.4"
+__version__ = "2.1.5"
 __maintainer__ = "Daniel McDonald"
 __email__ = "mcdonadt at colorado.edu"
 
@@ -79,6 +79,13 @@ if USE_CYTHON:
     from Cython.Build import cythonize
     extensions = cythonize(extensions)
 
+install_requires = ["click", "numpy >= 1.3.0", "future >= 0.14.3",
+                    "scipy >= 0.13.0"]
+# HACK: for backward-compatibility with QIIME 1.9.x, pyqi must be installed.
+# pyqi is not used anymore in this project.
+if sys.version_info[0] < 3:
+    install_requires.append("pyqi")
+
 setup(name='biom-format',
       version=__version__,
       description='Biological Observation Matrix (BIOM) format',
@@ -90,23 +97,16 @@ setup(name='biom-format',
       maintainer_email=__email__,
       url='http://www.biom-format.org',
       test_suite='nose.collector',
-      packages=['biom',
-                'biom/commands',
-                'biom/interfaces',
-                'biom/interfaces/optparse',
-                'biom/interfaces/optparse/config',
-                'biom/interfaces/html',
-                'biom/interfaces/html/config'
-                ],
+      packages=find_packages(),
+      include_package_data=True,
       ext_modules=extensions,
       include_dirs=[np.get_include()],
-      scripts=['scripts/biom',
-               'scripts/serve-biom'],
-      install_requires=["numpy >= 1.3.0",
-                        "pyqi == 0.3.2",
-                        "scipy >= 0.13.0"],
-      extras_require={'test': ["nose >= 0.10.1", "pep8", "flake8"],
+      install_requires=install_requires,
+      extras_require={'test': ["nose >= 0.10.1", "flake8"],
                       'hdf5': ["h5py >= 2.2.0"]
                       },
-      classifiers=classifiers
-      )
+      classifiers=classifiers,
+      entry_points='''
+          [console_scripts]
+          biom=biom.cli:cli
+      ''')
diff --git a/biom/interfaces/html/output_handler.py b/tests/test_cli/__init__.py
similarity index 78%
rename from biom/interfaces/html/output_handler.py
rename to tests/test_cli/__init__.py
index 63ce058..1e84e01 100644
--- a/biom/interfaces/html/output_handler.py
+++ b/tests/test_cli/__init__.py
@@ -1,7 +1,5 @@
-#!/usr/bin/env python
-
 # ----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
diff --git a/tests/test_commands/test_metadata_adder.py b/tests/test_cli/test_add_metadata.py
similarity index 80%
rename from tests/test_commands/test_metadata_adder.py
rename to tests/test_cli/test_add_metadata.py
index 9fafb29..90111d3 100644
--- a/tests/test_commands/test_metadata_adder.py
+++ b/tests/test_cli/test_add_metadata.py
@@ -1,34 +1,29 @@
 #!/usr/bin/env python
 
 # -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # -----------------------------------------------------------------------------
 
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout", "Jose Antonio Navas Molina"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
-from pyqi.core.exception import CommandError
-from biom.commands.metadata_adder import MetadataAdder
-from biom.parse import parse_biom_table
+import tempfile
 from unittest import TestCase, main
 
+import biom
+from biom.cli.metadata_adder import _add_metadata
 
-class MetadataAdderTests(TestCase):
+
+class TestAddMetadata(TestCase):
 
     def setUp(self):
         """Set up data for use in unit tests."""
-        self.cmd = MetadataAdder()
-        self.biom_lines1 = biom1
-        self.biom_table1 = parse_biom_table(self.biom_lines1)
+        self.cmd = _add_metadata
+        with tempfile.NamedTemporaryFile('w') as fh:
+            fh.write(biom1)
+            fh.flush()
+            self.biom_table1 = biom.load_table(fh.name)
         self.sample_md_lines1 = sample_md1.split('\n')
         self.obs_md_lines1 = obs_md1.split('\n')
 
@@ -38,15 +33,12 @@ class MetadataAdderTests(TestCase):
         # sample metadata to begin with. Don't perform any casting.
         obs = self.cmd(table=self.biom_table1,
                        sample_metadata=self.sample_md_lines1)
-        self.assertEqual(obs.keys(), ['table'])
 
-        obs, fmt = obs['table']
         self.assertEqual(obs.metadata()[obs.index('f4', 'sample')],
                          {'bar': '0.23', 'foo': '9', 'baz': 'abc;123'})
         self.assertEqual(obs.metadata()[obs.index('not16S.1', 'sample')],
                          {'bar': '-4.2', 'foo': '0', 'baz': '123;abc'})
         self.assertEqual(obs.metadata()[obs.index('f2', 'sample')], {})
-        self.assertEqual(fmt, 'hdf5')
 
     def test_add_sample_metadata_with_casting(self):
         """Correctly adds sample metadata with casting."""
@@ -54,15 +46,12 @@ class MetadataAdderTests(TestCase):
                        sample_metadata=self.sample_md_lines1,
                        sc_separated=['baz'], int_fields=['foo'],
                        float_fields=['bar'])
-        self.assertEqual(obs.keys(), ['table'])
 
-        obs, fmt = obs['table']
         self.assertEqual(obs.metadata()[obs.index('f4', 'sample')],
                          {'bar': 0.23, 'foo': 9, 'baz': ['abc', '123']})
         self.assertEqual(obs.metadata()[obs.index('not16S.1', 'sample')],
                          {'bar': -4.2, 'foo': 0, 'baz': ['123', 'abc']})
         self.assertEqual(obs.metadata()[obs.index('f2', 'sample')], {})
-        self.assertEqual(fmt, 'hdf5')
 
     def test_add_observation_metadata_no_casting(self):
         """Correctly adds observation metadata without casting it."""
@@ -71,11 +60,8 @@ class MetadataAdderTests(TestCase):
         # observations that aren't in the table are included. Don't perform any
         # casting.
         obs = self.cmd(table=self.biom_table1,
-                       observation_metadata=self.obs_md_lines1,
-                       output_as_json=True)
-        self.assertEqual(obs.keys(), ['table'])
+                       observation_metadata=self.obs_md_lines1)
 
-        obs, fmt = obs['table']
         metadata = obs.metadata(axis='observation')
         self.assertEqual(
             metadata[obs.index('None7', 'observation')],
@@ -86,16 +72,13 @@ class MetadataAdderTests(TestCase):
         self.assertEqual(
             metadata[obs.index('None8', 'observation')],
             {'taxonomy': ['k__Bacteria']})
-        self.assertEqual(fmt, 'json')
 
     def test_add_observation_metadata_with_casting(self):
         """Correctly adds observation metadata with casting."""
         obs = self.cmd(table=self.biom_table1,
                        observation_metadata=self.obs_md_lines1,
                        sc_pipe_separated=['taxonomy'], int_fields=['foo'])
-        self.assertEqual(obs.keys(), ['table'])
 
-        obs, fmt = obs['table']
         metadata = obs.metadata(axis='observation')
         self.assertEqual(
             metadata[obs.index('None7', 'observation')],
@@ -106,12 +89,6 @@ class MetadataAdderTests(TestCase):
         self.assertEqual(
             metadata[obs.index('None8', 'observation')],
             {'taxonomy': ['k__Bacteria']})
-        self.assertEqual(fmt, 'hdf5')
-
-    def test_no_metadata(self):
-        """Correctly raises error when not provided any metadata."""
-        with self.assertRaises(CommandError):
-            self.cmd(table=self.biom_table1)
 
 
 biom1 = ('{"id": "None","format": "Biological Observation Matrix 1.0.0","form'
diff --git a/tests/test_commands/test_data/json_obs_collapsed.biom b/tests/test_cli/test_data/json_obs_collapsed.biom
similarity index 100%
rename from tests/test_commands/test_data/json_obs_collapsed.biom
rename to tests/test_cli/test_data/json_obs_collapsed.biom
diff --git a/tests/test_commands/test_data/json_sample_collapsed.biom b/tests/test_cli/test_data/json_sample_collapsed.biom
similarity index 100%
rename from tests/test_commands/test_data/json_sample_collapsed.biom
rename to tests/test_cli/test_data/json_sample_collapsed.biom
diff --git a/tests/test_commands/test_data/test.biom b/tests/test_cli/test_data/test.biom
similarity index 100%
rename from tests/test_commands/test_data/test.biom
rename to tests/test_cli/test_data/test.biom
diff --git a/tests/test_cli/test_data/test.json b/tests/test_cli/test_data/test.json
new file mode 100644
index 0000000..74cd4d3
--- /dev/null
+++ b/tests/test_cli/test_data/test.json
@@ -0,0 +1 @@
+{"id": "No Table ID","format": "Biological Observation Matrix 1.0.0","format_url": "http://biom-format.org","generated_by": "BIOM-Format 2.0.0-dev","date": "2014-06-02T10:08:43.174137", "type": "OTU table", "matrix_element_type": "float","shape": [5, 6],"data": [[0,2,1.0],[1,0,5.0],[1,1,1.0],[1,3,2.0],[1,4,3.0],[1,5,1.0],[2,2,1.0],[2,3,4.0],[2,5,2.0],[3,0,2.0],[3,1,1.0],[3,2,1.0],[3,5,1.0],[4,1,1.0],[4,2,1.0]],"rows": [{"id": "GG_OTU_1", "metadata": {"taxonomy": ["k__Bacteria", "p__Prote [...]
diff --git a/tests/test_cli/test_show_install_info.py b/tests/test_cli/test_show_install_info.py
new file mode 100644
index 0000000..35cd63a
--- /dev/null
+++ b/tests/test_cli/test_show_install_info.py
@@ -0,0 +1,24 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+import unittest
+
+from biom.cli.installation_informer import _show_install_info
+
+
+class TestShowInstallInfo(unittest.TestCase):
+    def test_default(self):
+        # Not really sure what to specifically test here, as this information
+        # will change on a per-install basis. Just make sure the code is being
+        # exercised and we have some output.
+        obs = _show_install_info()
+        self.assertTrue(len(obs) > 0)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/test_commands/test_table_subsetter.py b/tests/test_cli/test_subset_table.py
similarity index 65%
rename from tests/test_commands/test_table_subsetter.py
rename to tests/test_cli/test_subset_table.py
index e118c5a..f3d8265 100644
--- a/tests/test_commands/test_table_subsetter.py
+++ b/tests/test_cli/test_subset_table.py
@@ -1,43 +1,31 @@
-#!/usr/bin/env python
-
 # -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # -----------------------------------------------------------------------------
 
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
 import os
-from pyqi.core.exception import CommandError
-from biom.commands.table_subsetter import TableSubsetter
-from biom.parse import parse_biom_table
-from unittest import TestCase, main
+import unittest
+
 import numpy.testing as npt
-from biom.util import HAVE_H5PY
 
+from biom.cli.table_subsetter import _subset_table
+from biom.parse import parse_biom_table
+from biom.util import HAVE_H5PY
 
-class TableSubsetterTests(TestCase):
 
+class TestSubsetTable(unittest.TestCase):
     def setUp(self):
         """Set up data for use in unit tests."""
-        self.cmd = TableSubsetter()
         self.biom_str1 = biom1
 
     def test_subset_samples(self):
         """Correctly subsets samples in a table."""
-        obs = self.cmd(json_table_str=self.biom_str1, axis='sample',
-                       ids=['f4', 'f2'])
-        self.assertEqual(obs.keys(), ['subsetted_table'])
-        obs = parse_biom_table(list(obs['subsetted_table'][0]))
+        obs = _subset_table(json_table_str=self.biom_str1, axis='sample',
+                       ids=['f4', 'f2'], hdf5_biom=None)
+        obs = parse_biom_table(list(obs[0]))
         self.assertEqual(len(obs.ids()), 2)
         self.assertEqual(len(obs.ids(axis='observation')), 14)
         self.assertTrue('f4' in obs.ids())
@@ -45,10 +33,9 @@ class TableSubsetterTests(TestCase):
 
     def test_subset_observations(self):
         """Correctly subsets observations in a table."""
-        obs = self.cmd(json_table_str=self.biom_str1, axis='observation',
-                       ids=['None2', '879972'])
-        self.assertEqual(obs.keys(), ['subsetted_table'])
-        obs = parse_biom_table(list(obs['subsetted_table'][0]))
+        obs = _subset_table(json_table_str=self.biom_str1, axis='observation',
+                       ids=['None2', '879972'], hdf5_biom=None)
+        obs = parse_biom_table(list(obs[0]))
         self.assertEqual(len(obs.ids()), 9)
         self.assertEqual(len(obs.ids(axis='observation')), 2)
         self.assertTrue('None2' in obs.ids(axis='observation'))
@@ -56,15 +43,15 @@ class TableSubsetterTests(TestCase):
 
     def test_invalid_input(self):
         """Correctly raises politically correct error upon invalid input."""
-        with self.assertRaises(CommandError):
-            self.cmd(json_table_str=self.biom_str1, axis='foo',
+        with self.assertRaises(ValueError):
+            _subset_table(hdf5_biom=None, json_table_str=self.biom_str1, axis='foo',
                      ids=['f2', 'f4'])
 
-        with self.assertRaises(CommandError):
-            self.cmd(axis='sample', ids=['f2', 'f4'])
+        with self.assertRaises(ValueError):
+            _subset_table(hdf5_biom=None, json_table_str=None, axis='sample', ids=['f2', 'f4'])
 
-        with self.assertRaises(CommandError):
-            self.cmd(json_table_str=self.biom_str1, hdf5_table='foo',
+        with self.assertRaises(ValueError):
+            _subset_table(json_table_str=self.biom_str1, hdf5_biom='foo',
                      axis='sample', ids=['f2', 'f4'])
 
     @npt.dec.skipif(HAVE_H5PY is False, msg='H5PY is not installed')
@@ -73,16 +60,16 @@ class TableSubsetterTests(TestCase):
         cwd = os.getcwd()
         if '/' in __file__:
             os.chdir(__file__.rsplit('/', 1)[0])
-        obs = self.cmd(hdf5_table='test_data/test.biom', axis='sample',
-                       ids=['Sample1', 'Sample2', 'Sample3'])
+        obs = _subset_table(hdf5_biom='test_data/test.biom', axis='sample',
+                           ids=[u'Sample1', u'Sample2', u'Sample3'],
+                           json_table_str=None)
         os.chdir(cwd)
-        self.assertEqual(obs.keys(), ['subsetted_table'])
-        obs = obs['subsetted_table'][0]
+        obs = obs[0]
         self.assertEqual(len(obs.ids()), 3)
         self.assertEqual(len(obs.ids(axis='observation')), 5)
-        self.assertTrue('Sample1' in obs.ids())
-        self.assertTrue('Sample2' in obs.ids())
-        self.assertTrue('Sample3' in obs.ids())
+        self.assertTrue(u'Sample1' in obs.ids())
+        self.assertTrue(u'Sample2' in obs.ids())
+        self.assertTrue(u'Sample3' in obs.ids())
 
     @npt.dec.skipif(HAVE_H5PY is False, msg='H5PY is not installed')
     def test_subset_observations_hdf5(self):
@@ -90,16 +77,16 @@ class TableSubsetterTests(TestCase):
         cwd = os.getcwd()
         if '/' in __file__:
             os.chdir(__file__.rsplit('/', 1)[0])
-        obs = self.cmd(hdf5_table='test_data/test.biom', axis='observation',
-                       ids=['GG_OTU_1', 'GG_OTU_3', 'GG_OTU_5'])
+        obs = _subset_table(hdf5_biom='test_data/test.biom', axis='observation',
+                           ids=[u'GG_OTU_1', u'GG_OTU_3', u'GG_OTU_5'],
+                           json_table_str=None)
         os.chdir(cwd)
-        self.assertEqual(obs.keys(), ['subsetted_table'])
-        obs = obs['subsetted_table'][0]
+        obs = obs[0]
         self.assertEqual(len(obs.ids()), 4)
         self.assertEqual(len(obs.ids(axis='observation')), 3)
-        self.assertTrue('GG_OTU_1' in obs.ids(axis='observation'))
-        self.assertTrue('GG_OTU_3' in obs.ids(axis='observation'))
-        self.assertTrue('GG_OTU_5' in obs.ids(axis='observation'))
+        self.assertTrue(u'GG_OTU_1' in obs.ids(axis='observation'))
+        self.assertTrue(u'GG_OTU_3' in obs.ids(axis='observation'))
+        self.assertTrue(u'GG_OTU_5' in obs.ids(axis='observation'))
 
 
 biom1 = ('{"id": "None","format": "Biological Observation Matrix 1.0.0",'
@@ -131,6 +118,5 @@ biom1 = ('{"id": "None","format": "Biological Observation Matrix 1.0.0",'
          ' "p1", "metadata": null},{"id": "t1", "metadata": null},{"id": '
          '"not16S.1", "metadata": null},{"id": "t2", "metadata": null}]}')
 
-
 if __name__ == "__main__":
-    main()
+    unittest.main()
diff --git a/tests/test_cli/test_summarize_table.py b/tests/test_cli/test_summarize_table.py
new file mode 100644
index 0000000..ff85bc4
--- /dev/null
+++ b/tests/test_cli/test_summarize_table.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from biom.cli.table_summarizer import _summarize_table
+from biom.parse import load_table
+
+import tempfile
+from unittest import TestCase, main
+
+
+class TestSummarizeTable(TestCase):
+
+    def setUp(self):
+        with tempfile.NamedTemporaryFile(mode='w') as fh:
+            fh.write(biom1)
+            fh.flush()
+            self.biom1 = load_table(fh.name)
+
+    def test_default(self):
+        """ TableSummarizer functions as expected
+
+        """
+        result = _summarize_table(self.biom1)
+        # test same alphanumeric content, order of samples is runtime
+        # dependent
+        self.assertEqual(sorted(result), sorted(summary_default))
+
+    def test_qualitative(self):
+        """ TableSummarizer functions as expected with qualitative=True
+
+        """
+        result = _summarize_table(self.biom1, qualitative=True)
+        # test same alphanumeric content, order of samples is runtime
+        # dependent
+        self.assertEqual(sorted(result), sorted(summary_qualitative))
+
+biom1 = ('{"id": "None","format": "Biological Observation Matrix 1.0.0",'
+    '"format_url": "http://biom-format.org","type": "OTU table",'
+    '"generated_by": "QIIME 1.6.0-dev","date": '
+    '"2013-02-09T09:30:11.550590","matrix_type": "sparse",'
+    '"matrix_element_type": "int","shape": [14, 9],"data": [[0,0,20],'
+    '[0,1,18],[0,2,18],[0,3,22],[0,4,4],[1,4,1],[2,0,1],[2,4,1],[2,5,1],'
+    '[3,6,1],[4,4,1],[5,7,20],[6,4,1],[7,4,1],[7,5,1],[8,4,1],[8,6,2],'
+    '[8,8,3],[9,7,2],[10,5,1],[11,4,9],[11,5,20],[11,6,1],[11,8,4],'
+    '[12,4,3],[12,6,19],[12,8,15],[13,0,1],[13,1,4],[13,2,4]],"rows": '
+    '[{"id": "295053", "metadata": {"taxonomy": ["k__Bacteria"]}},{"id": '
+    '"42684", "metadata": {"taxonomy": ["k__Bacteria", '
+    '"p__Proteobacteria"]}},{"id": "None11", "metadata": {"taxonomy": '
+    '["Unclassified"]}},{"id": "None10", "metadata": {"taxonomy": '
+    '["Unclassified"]}},{"id": "None7", "metadata": {"taxonomy": '
+    '["Unclassified"]}},{"id": "None6", "metadata": {"taxonomy": '
+    '["Unclassified"]}},{"id": "None5", "metadata": {"taxonomy": '
+    '["k__Bacteria"]}},{"id": "None4", "metadata": {"taxonomy": '
+    '["Unclassified"]}},{"id": "None3", "metadata": {"taxonomy": '
+    '["k__Bacteria"]}},{"id": "None2", "metadata": {"taxonomy": '
+    '["k__Bacteria"]}},{"id": "None1", "metadata": {"taxonomy": '
+    '["Unclassified"]}},{"id": "879972", "metadata": {"taxonomy": '
+    '["k__Bacteria"]}},{"id": "None9", "metadata": {"taxonomy": '
+    '["Unclassified"]}},{"id": "None8", "metadata": {"taxonomy": '
+    '["k__Bacteria"]}}],"columns": [{"id": "f2", "metadata": null},'
+    '{"id": "f1", "metadata": null},{"id": "f3", "metadata": null},'
+    '{"id": "f4", "metadata": null},{"id": "p2", "metadata": null},{"id":'
+    ' "p1", "metadata": null},{"id": "t1", "metadata": null},{"id": '
+    '"not16S.1", "metadata": null},{"id": "t2", "metadata": null}]}')
+
+summary_default = """Num samples: 9
+Num observations: 14
+Total count: 200
+Table density (fraction of non-zero values): 0.238
+
+Counts/sample summary:
+ Min: 22.0
+ Max: 23.0
+ Median: 22.000
+ Mean: 22.222
+ Std. dev.: 0.416
+ Sample Metadata Categories: None provided
+ Observation Metadata Categories: taxonomy
+
+Counts/sample detail:
+p2: 22.0
+f1: 22.0
+f2: 22.0
+f3: 22.0
+f4: 22.0
+t2: 22.0
+not16S.1: 22.0
+t1: 23.0
+p1: 23.0"""
+
+summary_qualitative = """Num samples: 9
+Num observations: 14
+
+Observations/sample summary:
+ Min: 1
+ Max: 9
+ Median: 3.000
+ Mean: 3.333
+ Std. dev.: 2.211
+ Sample Metadata Categories: None provided
+ Observation Metadata Categories: taxonomy
+
+Observations/sample detail:
+f4: 1
+f1: 2
+f3: 2
+not16S.1: 2
+f2: 3
+t2: 3
+t1: 4
+p1: 4
+p2: 9"""
+
+if __name__ == "__main__":
+    main()
diff --git a/tests/test_commands/test_table_converter.py b/tests/test_cli/test_table_converter.py
similarity index 50%
rename from tests/test_commands/test_table_converter.py
rename to tests/test_cli/test_table_converter.py
index 9b0568c..be25e35 100644
--- a/tests/test_commands/test_table_converter.py
+++ b/tests/test_cli/test_table_converter.py
@@ -1,44 +1,45 @@
 #!/usr/bin/env python
 
 # -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # -----------------------------------------------------------------------------
 
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout", "Jose Antonio Navas Molina"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
 from os.path import abspath, dirname, join
+import tempfile
 
 import numpy as np
 
-from pyqi.core.exception import CommandError
-from biom.commands.table_converter import TableConverter
-from biom.parse import MetadataMap, parse_biom_table
+from biom.cli.table_converter import _convert
+from biom.cli.util import write_biom_table
+from biom.parse import MetadataMap, load_table
 from biom.table import Table
-from biom.util import biom_open
+from biom import load_table
+from biom.parse import biom_open, parse_biom_table
 from unittest import TestCase, main
-from StringIO import StringIO
+from io import StringIO
 
 
 class TableConverterTests(TestCase):
 
     def setUp(self):
         """Set up data for use in unit tests."""
-        self.cmd = TableConverter()
+        self.cmd = _convert
+        self.output_filepath = tempfile.NamedTemporaryFile().name
 
-        self.biom_lines1 = biom1
-        self.biom_table1 = parse_biom_table(self.biom_lines1)
+        with tempfile.NamedTemporaryFile('w') as fh:
+            fh.write(biom1)
+            fh.flush()
+            self.biom_table1 = load_table(fh.name)
 
-        self.classic_lines1 = classic1.split('\n')
+        self.biom_lines1 = biom1.split('\n')
+        with tempfile.NamedTemporaryFile('w') as fh:
+            fh.write(classic1)
+            fh.flush()
+            self.classic_biom1 = load_table(fh.name)
 
         self.sample_md1 = MetadataMap.from_file(sample_md1.split('\n'))
 
@@ -50,11 +51,11 @@ class TableConverterTests(TestCase):
 
     def test_classic_to_biom(self):
         """Correctly converts classic to biom."""
-        obs = self.cmd(table=parse_biom_table(self.classic_lines1),
-                       to_json=True, table_type='OTU table')
-        self.assertEqual(obs.keys(), ['table'])
+        self.cmd(table=self.classic_biom1,
+                 output_filepath=self.output_filepath,
+                 to_json=True, table_type='OTU table')
 
-        obs = parse_biom_table(obs['table'][0].to_json('testing'))
+        obs = load_table(self.output_filepath)
         self.assertEqual(type(obs), Table)
         self.assertEqual(len(obs.ids()), 9)
         self.assertEqual(len(obs.ids(axis='observation')), 14)
@@ -64,18 +65,18 @@ class TableConverterTests(TestCase):
     def test_classic_to_biom_with_metadata(self):
         """Correctly converts classic to biom with metadata."""
         # No processing of metadata.
-        obs = self.cmd(table=parse_biom_table(self.classic_lines1),
+        obs = self.cmd(table=self.classic_biom1,
+                       output_filepath=self.output_filepath,
                        sample_metadata=self.sample_md1, to_json=True,
                        table_type='OTU table', process_obs_metadata='naive')
-        self.assertEqual(obs.keys(), ['table'])
 
-        obs = parse_biom_table(obs['table'][0].to_json('testing'))
+        obs = load_table(self.output_filepath)
         self.assertEqual(type(obs), Table)
         self.assertEqual(len(obs.ids()), 9)
         self.assertEqual(len(obs.ids(axis='observation')), 14)
         self.assertNotEqual(obs.metadata(), None)
         self.assertNotEqual(obs.metadata(axis='observation'), None)
-        self.assertEqual(obs.metadata()[obs.index('p2', 'sample')],
+        self.assertEqual(obs.metadata()[obs.index(u'p2', u'sample')],
                          {'foo': 'c;b;a'})
         self.assertEqual(obs.metadata()[obs.index('not16S.1', 'sample')],
                          {'foo': 'b;c;d'})
@@ -84,12 +85,12 @@ class TableConverterTests(TestCase):
             {'taxonomy': 'Unclassified'})
 
         # With processing of metadata (currently only supports observation md).
-        obs = self.cmd(table=parse_biom_table(self.classic_lines1),
+        obs = self.cmd(table=self.classic_biom1,
+                       output_filepath=self.output_filepath,
                        sample_metadata=self.sample_md1, table_type='OTU table',
                        process_obs_metadata='sc_separated', to_json=True)
-        self.assertEqual(obs.keys(), ['table'])
 
-        obs = parse_biom_table(obs['table'][0].to_json('testing'))
+        obs = load_table(self.output_filepath)
         self.assertEqual(type(obs), Table)
         self.assertEqual(len(obs.ids()), 9)
         self.assertEqual(len(obs.ids(axis='observation')), 14)
@@ -103,141 +104,118 @@ class TableConverterTests(TestCase):
             obs.index('None11', 'observation')],
             {'taxonomy': ['Unclassified']})
 
-    def test_biom_to_classic(self):
+    def test_biom_to_classic1(self):
         """Correctly converts biom to classic."""
-        obs = self.cmd(table=parse_biom_table(self.biom_lines1),
+        self.cmd(table=self.biom_table1,
+                       output_filepath=self.output_filepath,
                        to_tsv=True, header_key='taxonomy')
-        self.assertEqual(obs.keys(), ['table'])
-        self.assertEqual(obs['table'][0], classic1)
-
-        obs = self.cmd(table=parse_biom_table(self.biom_lines1), to_tsv=True,
-                       header_key='taxonomy', output_metadata_id='foo')
-        self.assertEqual(obs.keys(), ['table'])
-        obs_md_col = obs['table'][0].split('\n')[1].split('\t')[-1]
-        self.assertEqual(obs_md_col, 'foo')
-
-    def test_invalid_input(self):
-        """Correctly handles invalid input by raising a CommandError."""
-        # Too many ops.
-        with self.assertRaises(CommandError):
-            self.cmd(table_file=self.biom_lines1,
-                     sparse_biom_to_dense_biom=True,
-                     biom_to_classic_table=True)
 
-        # biom -> classic, but supply classic
-        with self.assertRaises(CommandError):
-            self.cmd(table_file=self.classic_lines1,
-                     biom_to_classic_table=True)
+        self.assertEqual(load_table(self.output_filepath), self.classic_biom1)
 
-        # sparse biom -> dense biom, but supply classic
-        with self.assertRaises(CommandError):
-            self.cmd(table_file=self.classic_lines1,
-                     sparse_biom_to_dense_biom=True)
-
-        # dense biom -> sparse biom, but supply classic
-        with self.assertRaises(CommandError):
-            self.cmd(table_file=self.classic_lines1,
-                     dense_biom_to_sparse_biom=True)
-
-        # Unknown observation processor.
-        with self.assertRaises(CommandError):
-            self.cmd(table_file=self.classic_lines1,
-                     process_obs_metadata='foo')
-
-        # classic -> biom, but supply biom
-        with self.assertRaises(CommandError):
-            self.cmd(table_file=StringIO(self.biom_lines1),
-                     process_obs_metadata='sc_separated')
+    def test_biom_to_classic2(self):
+        """Correctly converts biom to classic with metadata renaming."""
+        self.cmd(table=self.biom_table1,
+                       output_filepath=self.output_filepath, to_tsv=True,
+                       header_key='taxonomy', output_metadata_id='foo')
+        obs = load_table(self.output_filepath)
+        self.assertTrue('foo' in obs.metadata(axis='observation')[0])
 
     def test_json_to_hdf5_collapsed_samples(self):
         """Correctly converts json to HDF5 changing the sample metadata"""
         with biom_open(self.json_collapsed_samples) as f:
-            obs = self.cmd(table=parse_biom_table(f), to_hdf5=True,
+            obs = self.cmd(table=parse_biom_table(f),
+                           output_filepath=self.output_filepath, to_hdf5=True,
                            collapsed_samples=True)
-        self.assertEqual(obs.keys(), ['table'])
+        obs = load_table(self.output_filepath)
         exp = Table(np.array([[0., 1.], [6., 6.], [6., 1.],
                               [1., 4.], [0., 2.]]),
-                    observation_ids=['GG_OTU_1', 'GG_OTU_2', 'GG_OTU_3',
-                                     'GG_OTU_4', 'GG_OTU_5'],
-                    sample_ids=['skin', 'gut'],
+                    observation_ids=[u'GG_OTU_1', u'GG_OTU_2', u'GG_OTU_3',
+                                     u'GG_OTU_4', u'GG_OTU_5'],
+                    sample_ids=[u'skin', u'gut'],
                     observation_metadata=[
-                        {'taxonomy': ['k__Bacteria', 'p__Proteobacteria',
-                                      'c__Gammaproteobacteria',
-                                      'o__Enterobacteriales',
-                                      'f__Enterobacteriaceae',
-                                      'g__Escherichia', 's__']},
-                        {'taxonomy': ['k__Bacteria', 'p__Cyanobacteria',
-                                      'c__Nostocophycideae', 'o__Nostocales',
-                                      'f__Nostocaceae', 'g__Dolichospermum',
-                                      's__']},
-                        {'taxonomy': ['k__Archaea', 'p__Euryarchaeota',
-                                      'c__Methanomicrobia',
-                                      'o__Methanosarcinales',
-                                      'f__Methanosarcinaceae',
-                                      'g__Methanosarcina', 's__']},
-                        {'taxonomy': ['k__Bacteria', 'p__Firmicutes',
-                                      'c__Clostridia', 'o__Halanaerobiales',
-                                      'f__Halanaerobiaceae',
-                                      'g__Halanaerobium',
-                                      's__Halanaerobiumsaccharolyticum']},
-                        {'taxonomy': ['k__Bacteria', 'p__Proteobacteria',
-                                      'c__Gammaproteobacteria',
-                                      'o__Enterobacteriales',
-                                      'f__Enterobacteriaceae',
-                                      'g__Escherichia', 's__']}],
+                        {u'taxonomy': [u'k__Bacteria', u'p__Proteobacteria',
+                                       u'c__Gammaproteobacteria',
+                                       u'o__Enterobacteriales',
+                                       u'f__Enterobacteriaceae',
+                                       u'g__Escherichia', u's__']},
+                        {u'taxonomy': [u'k__Bacteria', u'p__Cyanobacteria',
+                                       u'c__Nostocophycideae',
+                                       u'o__Nostocales', u'f__Nostocaceae',
+                                       u'g__Dolichospermum', u's__']},
+                        {u'taxonomy': [u'k__Archaea', u'p__Euryarchaeota',
+                                       u'c__Methanomicrobia',
+                                       u'o__Methanosarcinales',
+                                       u'f__Methanosarcinaceae',
+                                       u'g__Methanosarcina', u's__']},
+                        {u'taxonomy': [u'k__Bacteria', u'p__Firmicutes',
+                                       u'c__Clostridia', u'o__Halanaerobiales',
+                                       u'f__Halanaerobiaceae',
+                                       u'g__Halanaerobium',
+                                       u's__Halanaerobiumsaccharolyticum']},
+                        {u'taxonomy': [u'k__Bacteria', u'p__Proteobacteria',
+                                       u'c__Gammaproteobacteria',
+                                       u'o__Enterobacteriales',
+                                       u'f__Enterobacteriaceae',
+                                       u'g__Escherichia', u's__']}],
                     sample_metadata=[
-                        {'collapsed_ids': ['Sample5', 'Sample4', 'Sample6']},
-                        {'collapsed_ids': ['Sample1', 'Sample3', 'Sample2']}
+                        {u'collapsed_ids': [u'Sample4', u'Sample5',
+                                            u'Sample6']},
+                        {u'collapsed_ids': [u'Sample1', u'Sample2',
+                                            u'Sample3']}
                         ],
-                    type='OTU table')
-        self.assertEqual(obs['table'][0], exp)
+                    type=u'OTU table')
+        self.assertEqual(obs, exp)
 
     def test_json_to_hdf5_collapsed_metadata(self):
         """Correctly converts json to HDF5 changing the observation metadata"""
         with biom_open(self.json_collapsed_obs) as f:
-            obs = self.cmd(table=parse_biom_table(f), to_hdf5=True,
+            t = parse_biom_table(f)
+            obs = self.cmd(table=t,
+                           output_filepath=self.output_filepath, to_hdf5=True,
                            collapsed_observations=True)
-        self.assertEqual(obs.keys(), ['table'])
+        obs = load_table(self.output_filepath)
         exp = Table(np.array([[2., 1., 1., 0., 0., 1.],
                               [0., 0., 1., 4., 0., 2.],
                               [5., 1., 0., 2., 3., 1.],
                               [0., 1., 2., 0., 0., 0.]]),
-                    observation_ids=['p__Firmicutes', 'p__Euryarchaeota',
-                                     'p__Cyanobacteria', 'p__Proteobacteria'],
-                    sample_ids=['Sample1', 'Sample2', 'Sample3',
-                                'Sample4', 'Sample5', 'Sample6'],
+                    observation_ids=[u'p__Firmicutes', u'p__Euryarchaeota',
+                                     u'p__Cyanobacteria',
+                                     u'p__Proteobacteria'],
+                    sample_ids=[u'Sample1', u'Sample2', u'Sample3',
+                                u'Sample4', u'Sample5', u'Sample6'],
                     observation_metadata=[
-                        {'collapsed_ids': ['GG_OTU_4']},
-                        {'collapsed_ids': ['GG_OTU_3']},
-                        {'collapsed_ids': ['GG_OTU_2']},
-                        {'collapsed_ids': ['GG_OTU_1', 'GG_OTU_5']}],
+                        {u'collapsed_ids': [u'GG_OTU_4']},
+                        {u'collapsed_ids': [u'GG_OTU_3']},
+                        {u'collapsed_ids': [u'GG_OTU_2']},
+                        {u'collapsed_ids': [u'GG_OTU_1', u'GG_OTU_5']}],
                     sample_metadata=[
-                        {'LinkerPrimerSequence': 'CATGCTGCCTCCCGTAGGAGT',
-                         'BarcodeSequence': 'CGCTTATCGAGA',
-                         'Description': 'human gut',
-                         'BODY_SITE': 'gut'},
-                        {'LinkerPrimerSequence': 'CATGCTGCCTCCCGTAGGAGT',
-                         'BarcodeSequence': 'CATACCAGTAGC',
-                         'Description': 'human gut',
-                         'BODY_SITE': 'gut'},
-                        {'LinkerPrimerSequence': 'CATGCTGCCTCCCGTAGGAGT',
-                         'BarcodeSequence': 'CTCTCTACCTGT',
-                         'Description': 'human gut',
-                         'BODY_SITE': 'gut'},
-                        {'LinkerPrimerSequence': 'CATGCTGCCTCCCGTAGGAGT',
-                         'BarcodeSequence': 'CTCTCGGCCTGT',
-                         'Description': 'human skin',
-                         'BODY_SITE': 'skin'},
-                        {'LinkerPrimerSequence': 'CATGCTGCCTCCCGTAGGAGT',
-                         'BarcodeSequence': 'CTCTCTACCAAT',
-                         'Description': 'human skin',
-                         'BODY_SITE': 'skin'},
-                        {'LinkerPrimerSequence': 'CATGCTGCCTCCCGTAGGAGT',
-                         'BarcodeSequence': 'CTAACTACCAAT',
-                         'Description': 'human skin',
-                         'BODY_SITE': 'skin'}],
-                    type='OTU table')
-        self.assertEqual(obs['table'][0], exp)
+                        {u'LinkerPrimerSequence': u'CATGCTGCCTCCCGTAGGAGT',
+                         u'BarcodeSequence': u'CGCTTATCGAGA',
+                         u'Description': u'human gut',
+                         u'BODY_SITE': u'gut'},
+                        {u'LinkerPrimerSequence': u'CATGCTGCCTCCCGTAGGAGT',
+                         u'BarcodeSequence': u'CATACCAGTAGC',
+                         u'Description': u'human gut',
+                         u'BODY_SITE': u'gut'},
+                        {u'LinkerPrimerSequence': u'CATGCTGCCTCCCGTAGGAGT',
+                         u'BarcodeSequence': u'CTCTCTACCTGT',
+                         u'Description': u'human gut',
+                         u'BODY_SITE': u'gut'},
+                        {u'LinkerPrimerSequence': u'CATGCTGCCTCCCGTAGGAGT',
+                         u'BarcodeSequence': u'CTCTCGGCCTGT',
+                         u'Description': u'human skin',
+                         u'BODY_SITE': u'skin'},
+                        {u'LinkerPrimerSequence': u'CATGCTGCCTCCCGTAGGAGT',
+                         u'BarcodeSequence': u'CTCTCTACCAAT',
+                         u'Description': u'human skin',
+                         u'BODY_SITE': u'skin'},
+                        {u'LinkerPrimerSequence': u'CATGCTGCCTCCCGTAGGAGT',
+                         u'BarcodeSequence': u'CTAACTACCAAT',
+                         u'Description': u'human skin',
+                         u'BODY_SITE': u'skin'}],
+                    type=u'OTU table')
+
+        self.assertEqual(obs, exp)
 
 
 biom1 = """
diff --git a/tests/test_cli/test_table_normalizer.py b/tests/test_cli/test_table_normalizer.py
new file mode 100755
index 0000000..8e4feaf
--- /dev/null
+++ b/tests/test_cli/test_table_normalizer.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011-2013, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+from unittest import TestCase, main
+
+import os
+
+import biom
+from biom.cli.table_normalizer import _normalize_table
+from biom.parse import parse_biom_table
+from biom.util import HAVE_H5PY
+from biom.exception import UnknownAxisError
+
+
+class TableNormalizerTests(TestCase):
+
+    def setUp(self):
+        """initialize objects for use in tests"""
+        self.cmd = _normalize_table
+
+        cwd = os.getcwd()
+        if '/' in __file__:
+            os.chdir(__file__.rsplit('/', 1)[0])
+        self.table = biom.load_table('test_data/test.json')
+        os.chdir(cwd)
+
+    def test_bad_inputs(self):
+        # relative_abund and pa
+        with self.assertRaises(ValueError):
+            self.cmd(self.table, relative_abund=True,
+                     presence_absence=True, axis="sample")
+        # no normalization type
+        with self.assertRaises(ValueError):
+            self.cmd(self.table, relative_abund=False,
+                     presence_absence=False, axis="sample")
+        # bad axis
+        with self.assertRaises(UnknownAxisError):
+            self.cmd(self.table, relative_abund=True,
+                     axis="nonsense")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/tests/test_cli/test_uc_processor.py b/tests/test_cli/test_uc_processor.py
new file mode 100644
index 0000000..6c4493d
--- /dev/null
+++ b/tests/test_cli/test_uc_processor.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011-2015, The BIOM Format Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+import tempfile
+from unittest import TestCase, main
+
+import numpy as np
+
+import biom
+from biom.cli.uc_processor import _from_uc
+
+class TestUcProcessor(TestCase):
+
+    def setUp(self):
+        """Set up data for use in unit tests."""
+        self.cmd = _from_uc
+        self.uc_minimal = uc_minimal.split('\n')
+        self.uc = uc.split('\n')
+        self.rep_set = rep_set.split('\n')
+        self.rep_set_no_mapping = rep_set_no_mapping.split('\n')
+        self.rep_set_missing_id = rep_set_missing_id.split('\n')
+
+    def test_basic(self):
+        obs = self.cmd(self.uc_minimal)
+        expected = biom.Table(np.array([[1.0]]),
+                              observation_ids=['f2_1539'],
+                              sample_ids=['f2'])
+        self.assertEqual(obs, expected)
+
+    def test_basic_w_mapping(self):
+        obs = self.cmd(self.uc_minimal, self.rep_set)
+        expected = biom.Table(np.array([[1.0]]),
+                              observation_ids=['otu1'],
+                              sample_ids=['f2'])
+        self.assertEqual(obs, expected)
+
+    def test_rep_set_no_mapping(self):
+        self.assertRaises(ValueError, self.cmd, self.uc_minimal,
+                          self.rep_set_no_mapping)
+
+    def test_rep_set_missing_id(self):
+        self.assertRaises(ValueError, self.cmd, self.uc_minimal,
+                          self.rep_set_missing_id)
+
+    def test_uc(self):
+        obs = self.cmd(self.uc)
+        expected = biom.Table(np.array([[1.0, 1.0], [0.0, 1.0]]),
+                              observation_ids=['f2_1539', 'f3_1540'],
+                              sample_ids=['f2', 'f3'])
+        self.assertEqual(obs, expected)
+
+    def test_uc_w_mapping(self):
+        obs = self.cmd(self.uc, self.rep_set)
+        expected = biom.Table(np.array([[1.0, 1.0], [0.0, 1.0]]),
+                              observation_ids=['otu1', 'otu2'],
+                              sample_ids=['f2', 'f3'])
+        self.assertEqual(obs, expected)
+
+uc_minimal = """# uclust --input /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T/UclustExactMatchFilterrW47Ju.fasta --id 0.97 --tmpdir /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T --w 8 --stepwords 8 --usersort --maxaccepts 1 --stable_sort --maxrejects 8 --uc dn-otus/uclust_picked_otus/seqs_clusters.uc
+# version=1.2.22
+# Tab-separated fields:
+# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel
+# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit
+# For C and D types, PctId is average id with seed.
+# QueryStart and SeedStart are zero-based relative to start of sequence.
+# If minus strand, SeedStart is relative to reverse-complemented seed.
+S	0	133	*	*	*	*	*	f2_1539	*
+"""
+
+uc = """# uclust --input /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T/UclustExactMatchFilterrW47Ju.fasta --id 0.97 --tmpdir /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T --w 8 --stepwords 8 --usersort --maxaccepts 1 --stable_sort --maxrejects 8 --uc dn-otus/uclust_picked_otus/seqs_clusters.uc
+# version=1.2.22
+# Tab-separated fields:
+# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel
+# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit
+# For C and D types, PctId is average id with seed.
+# QueryStart and SeedStart are zero-based relative to start of sequence.
+# If minus strand, SeedStart is relative to reverse-complemented seed.
+S	0	133	*	*	*	*	*	f2_1539	*
+S	0	133	*	*	*	*	*	f3_1540	*
+H	0	141	100.0	+	0	0	133M8D	f3_42	f2_1539
+"""
+
+rep_set = """>otu1 f2_1539
+ACGT
+>otu2 f3_1540
+ACCT
+"""
+
+rep_set_no_mapping = """>otu1
+ACGT
+>otu2
+ACCT
+"""
+
+rep_set_missing_id = """>otu1 f99_1539
+ACGT
+>otu2 f99_1539
+ACCT
+"""
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/test_commands/test_table_validator.py b/tests/test_cli/test_validate_table.py
similarity index 99%
rename from tests/test_commands/test_table_validator.py
rename to tests/test_cli/test_validate_table.py
index 59e4d15..ab3df4f 100644
--- a/tests/test_commands/test_table_validator.py
+++ b/tests/test_cli/test_validate_table.py
@@ -24,7 +24,7 @@ from shutil import copy
 
 import numpy.testing as npt
 
-from biom.commands.table_validator import TableValidator
+from biom.cli.table_validator import TableValidator
 from biom.util import HAVE_H5PY
 
 
diff --git a/tests/test_commands/__init__.py b/tests/test_commands/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/test_commands/test_installation_informer.py b/tests/test_commands/test_installation_informer.py
deleted file mode 100644
index a8c4728..0000000
--- a/tests/test_commands/test_installation_informer.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
-from biom.commands.installation_informer import InstallationInformer
-from unittest import TestCase, main
-
-
-class InstallationInformerTests(TestCase):
-
-    def setUp(self):
-        """Set up data for use in unit tests."""
-        self.cmd = InstallationInformer()
-
-    def test_default(self):
-        """Correctly returns info about the biom-format installation."""
-        # Not really sure what to specifically test here, as this information
-        # will change on a per-install basis. Just make sure the code is being
-        # exercised and we have some output.
-        obs = self.cmd()
-        self.assertEqual(obs.keys(), ['install_info_lines'])
-        self.assertTrue(len(obs['install_info_lines']) > 0)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/test_commands/test_table_normalizer.py b/tests/test_commands/test_table_normalizer.py
deleted file mode 100755
index d994366..0000000
--- a/tests/test_commands/test_table_normalizer.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-__author__ = "Michael Shaffer"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Michael Shaffer"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Greg Caporaso"
-__email__ = "gregcaporaso at gmail.com"
-
-from biom.commands.table_normalizer import TableNormalizer
-from biom.parse import parse_biom_table
-from unittest import TestCase, main
-from biom.util import HAVE_H5PY
-from pyqi.core.exception import CommandError
-
-
-class TableNormalizerTests(TestCase):
-
-    def setUp(self):
-        """initialize objects for use in tests"""
-        self.cmd = TableNormalizer()
-        self.biom_path = 'test_data/test.biom'
-
-    def test_correct_table_type(self):
-        table = self.cmd(biom_table=self.biom_path, relative_abund=True,
-                         axis="sample")['table']
-        if HAVE_H5PY:
-            self.assertEqual(table[1], "hdf5")
-        else:
-            self.assertEqual(table[1], "json")
-
-    def test_bad_inputs(self):
-        # relative_abund and pa
-        with self.assertRaises(CommandError):
-            self.cmd(biom_table=self.biom_path, relative_abund=True,
-                     presence_absence=True, axis="sample")
-        # no normalization type
-        with self.assertRaises(CommandError):
-            self.cmd(biom_table=self.biom_path, relative_abund=False,
-                     presence_absence=False, axis="sample")
-        # bad axis
-        with self.assertRaises(CommandError):
-            self.cmd(biom_table=self.biom_path, relative_abund=True,
-                     axis="nonsense")
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/test_commands/test_table_summarizer.py b/tests/test_commands/test_table_summarizer.py
deleted file mode 100644
index ddd91ce..0000000
--- a/tests/test_commands/test_table_summarizer.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-__author__ = "Greg Caporaso"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Greg Caporaso", "Jai Ram Rideout", "Jose Antonio Navas Molina"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Greg Caporaso"
-__email__ = "gregcaporaso at gmail.com"
-
-from biom.commands.table_summarizer import TableSummarizer
-from biom.parse import parse_biom_table
-from unittest import TestCase, main
-
-
-class TableSummarizerTests(TestCase):
-
-    def setUp(self):
-        """ initialize objects for use in tests """
-        self.biom1_lines = biom1
-        self.summary_default_lines = summary_default.split('\n')
-        self.summary_qualitative_lines = summary_qualitative.split('\n')
-
-    def test_default(self):
-        """ TableSummarizer functions as expected
-
-        """
-        t = TableSummarizer()
-        actual = t(table=(parse_biom_table(self.biom1_lines),
-                          self.biom1_lines.split('\n')),
-                   qualitative=False)
-        self.assertEqual(actual['biom_summary'], self.summary_default_lines)
-
-    def test_qualitative(self):
-        """ TableSummarizer functions as expected with qualitative=True
-
-        """
-        t = TableSummarizer()
-        actual = t(table=(parse_biom_table(self.biom1_lines),
-                          self.biom1_lines.split('\n')),
-                   qualitative=True)
-        self.assertEqual(
-            actual['biom_summary'],
-            self.summary_qualitative_lines)
-
-biom1 = ('{"id": "None","format": "Biological Observation Matrix 1.0.0",'
-         '"format_url": "http://biom-format.org","type": "OTU table",'
-         '"generated_by": "QIIME 1.6.0-dev","date": '
-         '"2013-02-09T09:30:11.550590","matrix_type": "sparse",'
-         '"matrix_element_type": "int","shape": [14, 9],"data": [[0,0,20],'
-         '[0,1,18],[0,2,18],[0,3,22],[0,4,4],[1,4,1],[2,0,1],[2,4,1],[2,5,1],'
-         '[3,6,1],[4,4,1],[5,7,20],[6,4,1],[7,4,1],[7,5,1],[8,4,1],[8,6,2],'
-         '[8,8,3],[9,7,2],[10,5,1],[11,4,9],[11,5,20],[11,6,1],[11,8,4],'
-         '[12,4,3],[12,6,19],[12,8,15],[13,0,1],[13,1,4],[13,2,4]],"rows": '
-         '[{"id": "295053", "metadata": {"taxonomy": ["k__Bacteria"]}},{"id": '
-         '"42684", "metadata": {"taxonomy": ["k__Bacteria", '
-         '"p__Proteobacteria"]}},{"id": "None11", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None10", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None7", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None6", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None5", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}},{"id": "None4", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None3", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}},{"id": "None2", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}},{"id": "None1", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "879972", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}},{"id": "None9", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None8", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}}],"columns": [{"id": "f2", "metadata": null},'
-         '{"id": "f1", "metadata": null},{"id": "f3", "metadata": null},'
-         '{"id": "f4", "metadata": null},{"id": "p2", "metadata": null},{"id":'
-         ' "p1", "metadata": null},{"id": "t1", "metadata": null},{"id": '
-         '"not16S.1", "metadata": null},{"id": "t2", "metadata": null}]}')
-
-summary_default = """Num samples: 9
-Num observations: 14
-Total count: 200
-Table density (fraction of non-zero values): 0.238
-
-Counts/sample summary:
- Min: 22.0
- Max: 23.0
- Median: 22.000
- Mean: 22.222
- Std. dev.: 0.416
- Sample Metadata Categories: None provided
- Observation Metadata Categories: taxonomy
-
-Counts/sample detail:
- p2: 22.0
- f1: 22.0
- f2: 22.0
- f3: 22.0
- f4: 22.0
- t2: 22.0
- not16S.1: 22.0
- t1: 23.0
- p1: 23.0"""
-
-summary_qualitative = """Num samples: 9
-Num observations: 14
-
-Observations/sample summary:
- Min: 1
- Max: 9
- Median: 3.000
- Mean: 3.333
- Std. dev.: 2.211
- Sample Metadata Categories: None provided
- Observation Metadata Categories: taxonomy
-
-Observations/sample detail:
- f4: 1
- f1: 2
- f3: 2
- not16S.1: 2
- f2: 3
- t2: 3
- t1: 4
- p1: 4
- p2: 9"""
-
-if __name__ == "__main__":
-    main()
diff --git a/tests/test_data/test.json b/tests/test_data/test.json
index 91c698a..74cd4d3 100644
--- a/tests/test_data/test.json
+++ b/tests/test_data/test.json
@@ -1 +1 @@
-{"id": "No Table ID","format": "Biological Observation Matrix 1.0.0","format_url": "http://biom-format.org","generated_by": "BIOM-Format 2.0.0-dev","date": "2014-06-02T10:08:43.174137","matrix_element_type": "float","shape": [5, 6],"data": [[0,2,1.0],[1,0,5.0],[1,1,1.0],[1,3,2.0],[1,4,3.0],[1,5,1.0],[2,2,1.0],[2,3,4.0],[2,5,2.0],[3,0,2.0],[3,1,1.0],[3,2,1.0],[3,5,1.0],[4,1,1.0],[4,2,1.0]],"rows": [{"id": "GG_OTU_1", "metadata": {"taxonomy": ["k__Bacteria", "p__Proteobacteria", "c__Gammap [...]
\ No newline at end of file
+{"id": "No Table ID","format": "Biological Observation Matrix 1.0.0","format_url": "http://biom-format.org","generated_by": "BIOM-Format 2.0.0-dev","date": "2014-06-02T10:08:43.174137", "type": "OTU table", "matrix_element_type": "float","shape": [5, 6],"data": [[0,2,1.0],[1,0,5.0],[1,1,1.0],[1,3,2.0],[1,4,3.0],[1,5,1.0],[2,2,1.0],[2,3,4.0],[2,5,2.0],[3,0,2.0],[3,1,1.0],[3,2,1.0],[3,5,1.0],[4,1,1.0],[4,2,1.0]],"rows": [{"id": "GG_OTU_1", "metadata": {"taxonomy": ["k__Bacteria", "p__Prote [...]
diff --git a/tests/test_interfaces/__init__.py b/tests/test_interfaces/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/test_interfaces/test_optparse/__init__.py b/tests/test_interfaces/test_optparse/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/test_interfaces/test_optparse/test_input_handler.py b/tests/test_interfaces/test_optparse/test_input_handler.py
deleted file mode 100644
index 9e2317c..0000000
--- a/tests/test_interfaces/test_optparse/test_input_handler.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011-2013, The BIOM Format Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-__author__ = "Jai Ram Rideout"
-__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
-__credits__ = ["Jai Ram Rideout"]
-__license__ = "BSD"
-__url__ = "http://biom-format.org"
-__maintainer__ = "Jai Ram Rideout"
-__email__ = "jai.rideout at gmail.com"
-
-import os
-from shutil import rmtree
-from tempfile import mkdtemp
-from unittest import TestCase, main
-from biom.interfaces.optparse.input_handler import (
-    load_biom_table, load_biom_table_with_file_contents, load_json_document,
-    load_metadata)
-from biom.parse import MetadataMap
-from biom.table import Table
-
-
-class InputHandlerTests(TestCase):
-
-    def setUp(self):
-        self.output_dir = mkdtemp()
-
-        self.biom_fp = os.path.join(self.output_dir, 'test.biom')
-        with open(self.biom_fp, 'w') as f:
-            f.write(biom1)
-
-        self.md_fp = os.path.join(self.output_dir, 'md.txt')
-        with open(self.md_fp, 'w') as f:
-            f.write(sample_md1)
-
-    def tearDown(self):
-        rmtree(self.output_dir)
-
-    def test_load_biom_table(self):
-        """Correctly parses and loads a BIOM table."""
-        obs = load_biom_table(self.biom_fp)
-        self.assertEqual(type(obs), Table)
-
-    def test_load_biom_table_with_file_contents(self):
-        """Correctly parses and loads a BIOM table, also returning the file."""
-        obs = load_biom_table_with_file_contents(self.biom_fp)
-        self.assertEqual(len(obs), 2)
-        self.assertEqual(type(obs[0]), Table)
-        self.assertEqual(type(obs[1]), file)
-        obs[1].close()
-
-    def test_load_json_document(self):
-        """Correctly parses and loads a JSON document."""
-        obs = load_json_document(self.biom_fp)
-        self.assertEqual(type(obs), dict)
-        self.assertEqual(obs['type'], 'OTU table')
-
-    def test_load_metadata(self):
-        """Correctly parses and loads a metadata map."""
-        obs = load_metadata(self.md_fp)
-        self.assertEqual(type(obs), MetadataMap)
-        self.assertEqual(obs['t1'], {'foo': 't;b;c'})
-
-        obs = load_metadata(None)
-        self.assertTrue(obs is None)
-
-
-biom1 = ('{"id": "None","format": "Biological Observation Matrix 1.0.0",'
-         '"format_url": "http://biom-format.org","type": "OTU table",'
-         '"generated_by": "QIIME 1.6.0-dev","date": '
-         '"2013-02-09T09:30:11.550590","matrix_type": "sparse",'
-         '"matrix_element_type": "int","shape": [14, 9],"data": [[0,0,20],'
-         '[0,1,18],[0,2,18],[0,3,22],[0,4,4],[1,4,1],[2,0,1],[2,4,1],[2,5,1],'
-         '[3,6,1],[4,4,1],[5,7,20],[6,4,1],[7,4,1],[7,5,1],[8,4,1],[8,6,2],'
-         '[8,8,3],[9,7,2],[10,5,1],[11,4,9],[11,5,20],[11,6,1],[11,8,4],'
-         '[12,4,3],[12,6,19],[12,8,15],[13,0,1],[13,1,4],[13,2,4]],"rows": '
-         '[{"id": "295053", "metadata": {"taxonomy": ["k__Bacteria"]}},{"id": '
-         '"42684", "metadata": {"taxonomy": ["k__Bacteria", '
-         '"p__Proteobacteria"]}},{"id": "None11", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None10", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None7", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None6", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None5", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}},{"id": "None4", "metadata": {"taxonomy": '
-         ' ["Unclassified"]}},{"id": "None3", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}},{"id": "None2", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}},{"id": "None1", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "879972", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}},{"id": "None9", "metadata": {"taxonomy": '
-         '["Unclassified"]}},{"id": "None8", "metadata": {"taxonomy": '
-         '["k__Bacteria"]}}],"columns": [{"id": "f2", "metadata": null},{"id":'
-         ' "f1", "metadata": null},{"id": "f3", "metadata": null},{"id": "f4",'
-         ' "metadata": null},{"id": "p2", "metadata": null},{"id": "p1",'
-         ' "metadata": null},{"id": "t1", "metadata": null},{"id": "not16S.1",'
-         ' "metadata": null},{"id": "t2", "metadata": null}]}')
-
-sample_md1 = """#SampleID\tfoo
-f4\ta;b;c
-not16S.1\tb;c;d
-f2\ta;c;d
-f1\ta;b;c
-p2\tc;b;a
-f3\ta;b;c
-t1\tt;b;c
-p1\tp;b;c
-t2\tt;2;z
-"""
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/test_parse.py b/tests/test_parse.py
index 02813b4..5be8d62 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -9,13 +9,14 @@
 # -----------------------------------------------------------------------------
 
 import os
-from StringIO import StringIO
+from io import StringIO
 import json
 from unittest import TestCase, main
 
+import numpy as np
 import numpy.testing as npt
 
-from biom.parse import generatedby, MetadataMap, parse_biom_table
+from biom.parse import generatedby, MetadataMap, parse_biom_table, parse_uc
 from biom.table import Table
 from biom.util import HAVE_H5PY, __version__
 if HAVE_H5PY:
@@ -162,7 +163,7 @@ class ParseTests(TestCase):
         self.assertEqual(tab.metadata(), None)
         self.assertEqual(tab.metadata(axis='observation'), None)
 
-        tablestring = '''{
+        tablestring = u'''{
             "id":null,
             "format": "Biological Observation Matrix 0.9.1-dev",
             "format_url": "http://biom-format.org",
@@ -355,7 +356,7 @@ K00507	0.0	0.0	Metabolism; Lipid Metabolism; Biosynthesis of unsaturated fatt\
 y acids|Organismal Systems; Endocrine System; PPAR signaling pathway
 """
 
-biom_minimal_sparse = """
+biom_minimal_sparse = u"""
     {
         "id":null,
         "format": "Biological Observation Matrix v0.9",
@@ -1553,5 +1554,139 @@ classic_otu_table1_no_tax = """#Full OTU Counts
 415	0	0	0	0	0	7	0	2	2
 416	0	1	0	0	1	0	0	0	0"""
 
+
+class ParseUcTests(TestCase):
+
+    def test_empty(self):
+        """ empty uc file returns empty Table
+        """
+        actual = parse_uc(uc_empty.split('\n'))
+        expected = Table(np.array([[]]),
+                         observation_ids=[],
+                         sample_ids=[])
+        self.assertEqual(actual, expected)
+
+    def test_minimal(self):
+        """ single new seed observed
+        """
+        actual = parse_uc(uc_minimal.split('\n'))
+        expected = Table(np.array([[1.0]]),
+                         observation_ids=['f2_1539'],
+                         sample_ids=['f2'])
+        self.assertEqual(actual, expected)
+
+    def test_lib_minimal(self):
+        """ single library seed observed
+        """
+        actual = parse_uc(uc_lib_minimal.split('\n'))
+        expected = Table(np.array([[1.0]]),
+                         observation_ids=['295053'],
+                         sample_ids=['f2'])
+        self.assertEqual(actual, expected)
+
+    def test_invalid(self):
+        """ invalid query sequence identifier detected
+        """
+        self.assertRaises(ValueError, parse_uc, uc_invalid_id.split('\n'))
+
+    def test_seed_hits(self):
+        """ multiple new seeds observed
+        """
+        actual = parse_uc(uc_seed_hits.split('\n'))
+        expected = Table(np.array([[2.0, 1.0], [0.0, 1.0]]),
+                         observation_ids=['f2_1539', 'f3_44'],
+                         sample_ids=['f2', 'f3'])
+        self.assertEqual(actual, expected)
+
+    def test_mixed_hits(self):
+        """ new and library seeds observed
+        """
+        actual = parse_uc(uc_mixed_hits.split('\n'))
+        expected = Table(np.array([[2.0, 1.0], [0.0, 1.0], [1.0, 0.0]]),
+                         observation_ids=['f2_1539', 'f3_44', '295053'],
+                         sample_ids=['f2', 'f3'])
+        self.assertEqual(actual, expected)
+
+
+# no hits or library seeds
+uc_empty = """# uclust --input /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T/UclustExactMatchFilterrW47Ju.fasta --id 0.97 --tmpdir /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T --w 8 --stepwords 8 --usersort --maxaccepts 1 --stable_sort --maxrejects 8 --uc dn-otus/uclust_picked_otus/seqs_clusters.uc
+# version=1.2.22
+# Tab-separated fields:
+# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel
+# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit
+# For C and D types, PctId is average id with seed.
+# QueryStart and SeedStart are zero-based relative to start of sequence.
+# If minus strand, SeedStart is relative to reverse-complemented seed.
+"""
+
+# label not in qiime post-split-libraries format
+uc_invalid_id = """# uclust --input /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T/UclustExactMatchFilterrW47Ju.fasta --id 0.97 --tmpdir /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T --w 8 --stepwords 8 --usersort --maxaccepts 1 --stable_sort --maxrejects 8 --uc dn-otus/uclust_picked_otus/seqs_clusters.uc
+# version=1.2.22
+# Tab-separated fields:
+# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel
+# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit
+# For C and D types, PctId is average id with seed.
+# QueryStart and SeedStart are zero-based relative to start of sequence.
+# If minus strand, SeedStart is relative to reverse-complemented seed.
+S	0	133	*	*	*	*	*	1539	*
+"""
+
+# contains single new (de novo) seed hit
+uc_minimal = """# uclust --input /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T/UclustExactMatchFilterrW47Ju.fasta --id 0.97 --tmpdir /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T --w 8 --stepwords 8 --usersort --maxaccepts 1 --stable_sort --maxrejects 8 --uc dn-otus/uclust_picked_otus/seqs_clusters.uc
+# version=1.2.22
+# Tab-separated fields:
+# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel
+# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit
+# For C and D types, PctId is average id with seed.
+# QueryStart and SeedStart are zero-based relative to start of sequence.
+# If minus strand, SeedStart is relative to reverse-complemented seed.
+S	0	133	*	*	*	*	*	f2_1539	*
+"""
+
+# contains single library (reference) seed hit
+uc_lib_minimal = """# uclust --input /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T/UclustExactMatchFilterrW47Ju.fasta --id 0.97 --tmpdir /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T --w 8 --stepwords 8 --usersort --maxaccepts 1 --stable_sort --maxrejects 8 --uc dn-otus/uclust_picked_otus/seqs_clusters.uc
+# version=1.2.22
+# Tab-separated fields:
+# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel
+# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit
+# For C and D types, PctId is average id with seed.
+# QueryStart and SeedStart are zero-based relative to start of sequence.
+# If minus strand, SeedStart is relative to reverse-complemented seed.
+L	3	1389	*	*	*	*	*	295053	*
+H	3	133	100.0	+	0	0	519I133M737I	f2_1539	295053
+"""
+
+# contains new seed (de novo) hits only
+uc_seed_hits = """# uclust --input /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T/UclustExactMatchFilterrW47Ju.fasta --id 0.97 --tmpdir /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T --w 8 --stepwords 8 --usersort --maxaccepts 1 --stable_sort --maxrejects 8 --uc dn-otus/uclust_picked_otus/seqs_clusters.uc
+# version=1.2.22
+# Tab-separated fields:
+# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel
+# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit
+# For C and D types, PctId is average id with seed.
+# QueryStart and SeedStart are zero-based relative to start of sequence.
+# If minus strand, SeedStart is relative to reverse-complemented seed.
+S	0	133	*	*	*	*	*	f2_1539	*
+H	0	141	100.0	+	0	0	133M8D	f3_42	f2_1539
+H	0	141	100.0	+	0	0	133M8D	f2_43	f2_1539
+S	0	133	*	*	*	*	*	f3_44	*
+"""
+
+# contains library (reference) and new seed (de novo) hits
+uc_mixed_hits = """# uclust --input /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T/UclustExactMatchFilterrW47Ju.fasta --id 0.97 --tmpdir /var/folders/xq/0kh93ng53bs6zzk091w_bbsr0000gn/T --w 8 --stepwords 8 --usersort --maxaccepts 1 --stable_sort --maxrejects 8 --uc dn-otus/uclust_picked_otus/seqs_clusters.uc
+# version=1.2.22
+# Tab-separated fields:
+# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel
+# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit
+# For C and D types, PctId is average id with seed.
+# QueryStart and SeedStart are zero-based relative to start of sequence.
+# If minus strand, SeedStart is relative to reverse-complemented seed.
+S	0	133	*	*	*	*	*	f2_1539	*
+H	0	141	100.0	+	0	0	133M8D	f3_42	f2_1539
+H	0	141	100.0	+	0	0	133M8D	f2_43	f2_1539
+S	0	133	*	*	*	*	*	f3_44	*
+L	3	1389	*	*	*	*	*	295053	*
+H	3	133	100.0	+	0	0	519I133M737I	f2_1539	295053
+"""
+
 if __name__ == '__main__':
     main()
diff --git a/tests/test_table.py b/tests/test_table.py
index adfe0ba..6e6f21e 100644
--- a/tests/test_table.py
+++ b/tests/test_table.py
@@ -13,12 +13,14 @@ import os
 from json import loads
 from tempfile import NamedTemporaryFile
 from unittest import TestCase, main
-from StringIO import StringIO
+from io import StringIO
 
+from future.utils import viewkeys
 import numpy.testing as npt
 import numpy as np
 from scipy.sparse import lil_matrix, csr_matrix, csc_matrix
 
+from biom import example_table
 from biom.exception import UnknownAxisError, UnknownIDError, TableException
 from biom.util import unzip, HAVE_H5PY, H5PY_VLEN_STR
 from biom.table import (Table, prefer_self, index_list, list_nparray_to_sparse,
@@ -44,6 +46,57 @@ __email__ = "daniel.mcdonald at colorado.edu"
 
 
 class SupportTests(TestCase):
+
+    def test_head(self):
+        # example table is 2 x 3, so no change in contained data
+        exp = example_table
+        obs = example_table.head()
+        self.assertIsNot(obs, exp)
+        self.assertEqual(obs, exp)
+
+    def test_head_bounded(self):
+        obs = example_table.head(1)
+        from collections import defaultdict
+        exp = Table(np.array([[0., 1., 2.]]), ['O1'], ['S1', 'S2', 'S3'],
+                             [{'taxonomy': ['Bacteria', 'Firmicutes']}],
+                             [{'environment': 'A'}, {'environment': 'B'},
+                              {'environment': 'A'}])
+
+        self.assertEqual(obs, exp)
+
+        obs = example_table.head(m=2)
+        exp = Table(np.array([[0., 1.], [3., 4.]]), ['O1', 'O2'], ['S1', 'S2'],
+                             [{'taxonomy': ['Bacteria', 'Firmicutes']},
+                              {'taxonomy': ['Bacteria', 'Bacteroidetes']}],
+                             [{'environment': 'A'}, {'environment': 'B'}])
+        self.assertEqual(obs, exp)
+
+    def test_head_overstep(self):
+        # silently works
+        exp = example_table
+        obs = example_table.head(10000)
+        self.assertIsNot(obs, exp)
+        self.assertEqual(obs, exp)
+
+    def test_head_zero_or_neg(self):
+        with self.assertRaises(IndexError):
+            example_table.head(0)
+
+        with self.assertRaises(IndexError):
+            example_table.head(-1)
+
+        with self.assertRaises(IndexError):
+            example_table.head(m=0)
+
+        with self.assertRaises(IndexError):
+            example_table.head(m=-1)
+
+        with self.assertRaises(IndexError):
+            example_table.head(0, 5)
+
+        with self.assertRaises(IndexError):
+            example_table.head(5, 0)
+
     def test_table_sparse_nparray(self):
         """beat the table sparsely to death"""
         # nparray test
@@ -324,11 +377,11 @@ class TableTests(TestCase):
         t = Table.from_hdf5(h5py.File('test_data/test.biom'))
         os.chdir(cwd)
 
-        npt.assert_equal(t.ids(), ('Sample1', 'Sample2', 'Sample3',
-                                   'Sample4', 'Sample5', 'Sample6'))
+        npt.assert_equal(t.ids(), (u'Sample1', u'Sample2', u'Sample3',
+                                   u'Sample4', u'Sample5', u'Sample6'))
         npt.assert_equal(t.ids(axis='observation'),
-                         ('GG_OTU_1', 'GG_OTU_2', 'GG_OTU_3',
-                          'GG_OTU_4', 'GG_OTU_5'))
+                         (u'GG_OTU_1', u'GG_OTU_2', u'GG_OTU_3',
+                          u'GG_OTU_4', u'GG_OTU_5'))
         exp_obs_md = ({u'taxonomy': [u'k__Bacteria',
                                      u'p__Proteobacteria',
                                      u'c__Gammaproteobacteria',
@@ -402,7 +455,7 @@ class TableTests(TestCase):
     @npt.dec.skipif(HAVE_H5PY is False, msg='H5PY is not installed')
     def test_from_hdf5_sample_subset(self):
         """Parse a sample subset of a hdf5 formatted BIOM table"""
-        samples = ['Sample2', 'Sample4', 'Sample6']
+        samples = [u'Sample2', u'Sample4', u'Sample6']
 
         cwd = os.getcwd()
         if '/' in __file__:
@@ -410,9 +463,9 @@ class TableTests(TestCase):
         t = Table.from_hdf5(h5py.File('test_data/test.biom'), ids=samples)
         os.chdir(cwd)
 
-        npt.assert_equal(t.ids(), ['Sample2', 'Sample4', 'Sample6'])
+        npt.assert_equal(t.ids(), [u'Sample2', u'Sample4', u'Sample6'])
         npt.assert_equal(t.ids(axis='observation'),
-                         ['GG_OTU_2', 'GG_OTU_3', 'GG_OTU_4', 'GG_OTU_5'])
+                         [u'GG_OTU_2', u'GG_OTU_3', u'GG_OTU_4', u'GG_OTU_5'])
         exp_obs_md = ({u'taxonomy': [u'k__Bacteria',
                                      u'p__Cyanobacteria',
                                      u'c__Nostocophycideae',
@@ -466,7 +519,7 @@ class TableTests(TestCase):
     @npt.dec.skipif(HAVE_H5PY is False, msg='H5PY is not installed')
     def test_from_hdf5_observation_subset(self):
         """Parse a observation subset of a hdf5 formatted BIOM table"""
-        observations = ['GG_OTU_1', 'GG_OTU_3', 'GG_OTU_5']
+        observations = [u'GG_OTU_1', u'GG_OTU_3', u'GG_OTU_5']
 
         cwd = os.getcwd()
         if '/' in __file__:
@@ -475,9 +528,10 @@ class TableTests(TestCase):
                             ids=observations, axis='observation')
         os.chdir(cwd)
 
-        npt.assert_equal(t.ids(), ['Sample2', 'Sample3', 'Sample4', 'Sample6'])
+        npt.assert_equal(t.ids(), [u'Sample2', u'Sample3', u'Sample4',
+                                   u'Sample6'])
         npt.assert_equal(t.ids(axis='observation'),
-                         ['GG_OTU_1', 'GG_OTU_3', 'GG_OTU_5'])
+                         [u'GG_OTU_1', u'GG_OTU_3', u'GG_OTU_5'])
         exp_obs_md = ({u'taxonomy': [u'k__Bacteria',
                                      u'p__Proteobacteria',
                                      u'c__Gammaproteobacteria',
@@ -596,7 +650,7 @@ class TableTests(TestCase):
 
         def bc_formatter(grp, category, md, compression):
             name = 'metadata/%s' % category
-            data = np.array([m[category].upper() for m in md])
+            data = np.array([m[category].upper().encode('utf8') for m in md])
             grp.create_dataset(name, shape=data.shape, dtype=H5PY_VLEN_STR,
                                data=data, compression=compression)
 
@@ -717,7 +771,7 @@ class TableTests(TestCase):
         self.assertEqual(sorted(sparse_rich.ids()),
                          sorted(['Fing', 'Key', 'NA']))
         self.assertEqual(sorted(sparse_rich.ids(axis='observation')),
-                         map(str, [0, 1, 3, 4, 7]))
+                         list(map(str, [0, 1, 3, 4, 7])))
         for i, obs_id in enumerate(sparse_rich.ids(axis='observation')):
             if obs_id == '0':
                 self.assertEqual(sparse_rich._observation_metadata[i],
@@ -758,7 +812,7 @@ class TableTests(TestCase):
         self.assertEqual(sorted(sparse_rich.ids()),
                          sorted(['Fing', 'Key', 'NA']))
         self.assertEqual(sorted(sparse_rich.ids(axis='observation')),
-                         map(str, [0, 1, 3, 4, 7]))
+                         list(map(str, [0, 1, 3, 4, 7])))
         for i, obs_id in enumerate(sparse_rich.ids(axis='observation')):
             if obs_id == '0':
                 self.assertEqual(sparse_rich._observation_metadata[i],
@@ -845,10 +899,10 @@ class TableTests(TestCase):
 
     def test_metadata_sample(self):
         """Return the sample metadata"""
-        obs = sorted(self.st_rich.metadata())
-        exp = sorted([{'barcode': 'aatt'}, {'barcode': 'ttgg'}])
+        obs = self.st_rich.metadata()
+        exp = [{'barcode': 'aatt'}, {'barcode': 'ttgg'}]
         for o, e in zip(obs, exp):
-            self.assertEqual(o, e)
+            self.assertDictEqual(o, e)
 
     def test_metadata_observation_id(self):
         """returns the observation metadata for a given id"""
@@ -862,11 +916,10 @@ class TableTests(TestCase):
 
     def test_metadata_observation(self):
         """returns the observation metadata"""
-        obs = sorted(self.st_rich.metadata(axis='observation'))
-        exp = sorted([{'taxonomy': ['k__a', 'p__b']},
-                      {'taxonomy': ['k__a', 'p__c']}])
+        obs = self.st_rich.metadata(axis='observation')
+        exp = [{'taxonomy': ['k__a', 'p__b']}, {'taxonomy': ['k__a', 'p__c']}]
         for o, e in zip(obs, exp):
-            self.assertEqual(o, e)
+            self.assertDictEqual(o, e)
 
     def test_index_invalid_input(self):
         """Correctly handles invalid input."""
@@ -1486,6 +1539,21 @@ class SparseTableTests(TestCase):
         obs = self.st1.update_ids(id_map, axis='observation', inplace=True)
         npt.assert_equal(self.st1._observation_ids, np.array(['41', '42']))
 
+    def test_update_ids_nochange_bug(self):
+        """ids are updated as expected"""
+        # update observation ids
+        exp = self.st1.copy()
+        id_map = {'1': '1', '2': '2'}
+        obs = self.st1.update_ids(id_map, axis='observation', inplace=False)
+        self.assertEqual(obs, exp)
+
+        # test having one ID remain unchanged
+        exp = self.st1.copy()
+        exp._observation_ids = np.array(['1', '3'])
+        id_map = {'1': '1', '2': '3'}
+        obs = self.st1.update_ids(id_map, axis='observation', inplace=False)
+        self.assertEqual(obs, exp)
+
     def test_update_ids_cache_bug(self):
         obs = self.st1.update_ids({'1': 'x', '2': 'y'}, axis='observation',
                                   inplace=False)
@@ -2509,9 +2577,8 @@ class SparseTableTests(TestCase):
         obs_king = dt_rich.collapse(bin_f, norm=False, axis='observation')
         self.assertEqual(obs_king, exp_king)
 
-        self.assertRaises(
-            TableException, dt_rich.collapse, bin_f, min_group_size=10,
-            axis='observation')
+        with errstate(all='raise'), self.assertRaises(TableException):
+            dt_rich.collapse(bin_f, min_group_size=10, axis='observation')
 
         # Test out include_collapsed_metadata=False.
         exp = Table(np.array([[24, 27, 30]]),
@@ -2557,8 +2624,9 @@ class SparseTableTests(TestCase):
             axis='sample').sort(axis='sample')
         self.assertEqual(obs_bc, exp_bc)
 
-        self.assertRaises(TableException, dt_rich.collapse,
-                          bin_f, min_group_size=10)
+        with errstate(all='raise'), self.assertRaises(TableException):
+            dt_rich.collapse(bin_f, min_group_size=10)
+
         # Test out include_collapsed_metadata=False.
         exp = Table(np.array([[12, 6], [18, 9], [24, 12]]),
                     ['1', '2', '3'],
@@ -2776,8 +2844,8 @@ class SparseTableTests(TestCase):
     def test_to_json_dense_int(self):
         """Get a BIOM format string for a dense table of integers"""
         # check by round trip
-        obs_ids = map(str, range(5))
-        samp_ids = map(str, range(10))
+        obs_ids = list(map(str, range(5)))
+        samp_ids = list(map(str, range(10)))
         obs_md = [{'foo': i} for i in range(5)]
         samp_md = [{'bar': i} for i in range(10)]
         data = np.reshape(np.arange(50), (5, 10))
@@ -2812,8 +2880,8 @@ class SparseTableTests(TestCase):
     def test_to_json_dense_int_directio(self):
         """Get a BIOM format string for a dense table of integers"""
         # check by round trip
-        obs_ids = map(str, range(5))
-        samp_ids = map(str, range(10))
+        obs_ids = list(map(str, range(5)))
+        samp_ids = list(map(str, range(10)))
         obs_md = [{'foo': i} for i in range(5)]
         samp_md = [{'bar': i} for i in range(10)]
         data = np.reshape(np.arange(50), (5, 10))
@@ -2854,8 +2922,8 @@ class SparseTableTests(TestCase):
     def test_to_json_sparse_int(self):
         """Get a BIOM format string for a sparse table of integers"""
         # check by round trip
-        obs_ids = map(str, range(5))
-        samp_ids = map(str, range(10))
+        obs_ids = list(map(str, range(5)))
+        samp_ids = list(map(str, range(10)))
         obs_md = [{'foo': i} for i in range(5)]
         samp_md = [{'bar': i} for i in range(10)]
         data = [[0, 0, 10], [1, 1, 11], [2, 2, 12], [3, 3, 13], [4, 4, 14],
@@ -2891,8 +2959,8 @@ class SparseTableTests(TestCase):
     def test_to_json_sparse_int_directio(self):
         """Get a BIOM format string for a sparse table of integers"""
         # check by round trip
-        obs_ids = map(str, range(5))
-        samp_ids = map(str, range(10))
+        obs_ids = list(map(str, range(5)))
+        samp_ids = list(map(str, range(10)))
         obs_md = [{'foo': i} for i in range(5)]
         samp_md = [{'bar': i} for i in range(10)]
         data = [[0, 0, 10], [1, 1, 11], [2, 2, 12], [3, 3, 13], [4, 4, 14],
@@ -2965,7 +3033,7 @@ class SparseTableTests(TestCase):
 
     def test_bin_samples_by_metadata(self):
         """Yield tables binned by sample metadata"""
-        f = lambda id_, md: md['age']
+        f = lambda id_, md: md.get('age', np.inf)
         obs_ids = ['a', 'b', 'c', 'd']
         samp_ids = ['1', '2', '3', '4']
         data = {(0, 0): 1, (0, 1): 2, (0, 2): 3, (0, 3): 4,
@@ -2977,7 +3045,7 @@ class SparseTableTests(TestCase):
         t = Table(data, obs_ids, samp_ids, obs_md, samp_md)
         obs_bins, obs_tables = unzip(t.partition(f))
 
-        exp_bins = (2, 4, None)
+        exp_bins = (2, 4, np.inf)
         exp1_data = {(0, 0): 1, (0, 1): 3, (1, 0): 5, (1, 1): 7, (2, 0): 8,
                      (2, 1): 10, (3, 0): 12, (3, 1): 14}
         exp1_obs_ids = ['a', 'b', 'c', 'd']
@@ -2997,7 +3065,7 @@ class SparseTableTests(TestCase):
         exp3_obs_ids = ['a', 'b', 'c', 'd']
         exp3_samp_ids = ['4']
         exp3_obs_md = [{}, {}, {}, {}]
-        exp3_samp_md = [{'age': None}]
+        exp3_samp_md = [{}]
         exp3 = Table(exp3_data, exp3_obs_ids, exp3_samp_ids, exp3_obs_md,
                      exp3_samp_md)
         exp_tables = (exp1, exp2, exp3)
@@ -3086,8 +3154,10 @@ class SparseTableTests(TestCase):
         exp_phy2 = Table(exp_phy2_data, exp_phy2_obs_ids, exp_phy2_samp_ids,
                          observation_metadata=exp_phy2_obs_md)
         obs_bins, obs_phy = unzip(t.partition(func_phy, axis='observation'))
-        self.assertEqual(obs_phy, [exp_phy1, exp_phy2])
-        self.assertEqual(obs_bins, [('k__a', 'p__b'), ('k__a', 'p__c')])
+        self.assertIn(obs_phy[0], [exp_phy1, exp_phy2])
+        self.assertIn(obs_phy[1], [exp_phy1, exp_phy2])
+        self.assertIn(obs_bins[0], [('k__a', 'p__b'), ('k__a', 'p__c')])
+        self.assertIn(obs_bins[1], [('k__a', 'p__b'), ('k__a', 'p__c')])
 
     def test_get_table_density(self):
         """Test correctly computes density of table."""
@@ -3268,7 +3338,7 @@ class SupportTests2(TestCase):
         obs = list_sparse_to_sparse(ins)
         self.assertEqual((obs != exp).sum(), 0)
 
-legacy_otu_table1 = """# some comment goes here
+legacy_otu_table1 = u"""# some comment goes here
 #OTU id\tFing\tKey\tNA\tConsensus Lineage
 0\t19111\t44536\t42 \tBacteria; Actinobacteria; Actinobacteridae; Propioniba\
 cterineae; Propionibacterium
@@ -3281,7 +3351,7 @@ ae; Corynebacteriaceae
 aphylococcaceae
 4\t589\t2074\t34\tBacteria; Cyanobacteria; Chloroplasts; vectors
 """
-otu_table1 = """# Some comment
+otu_table1 = u"""# Some comment
 #OTU ID\tFing\tKey\tNA\tConsensus Lineage
 0\t19111\t44536\t42\tBacteria; Actinobacteria; Actinobacteridae; \
 Propionibacterineae; Propionibacterium
diff --git a/tests/test_util.py b/tests/test_util.py
index 2a7b542..782544d 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -285,7 +285,7 @@ class UtilTests(TestCase):
 
     def test_biom_open_json(self):
         with biom_open(get_data_path('test.json')) as f:
-            self.assertTrue(isinstance(f, file))
+            self.assertTrue(hasattr(f, 'read'))
 
     def test_biom_open_gz(self):
         with biom_open(get_data_path('test.json.gz')) as f:

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-biom-format.git



More information about the debian-med-commit mailing list